]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-3.1-4.0.4-201505182014.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-3.1-4.0.4-201505182014.patch
CommitLineData
c8714b5a
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index 9de9813..1462492 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -3,9 +3,11 @@
6 *.bc
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -15,6 +17,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -51,14 +54,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -72,9 +78,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -83,6 +91,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -95,32 +104,40 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89 crc32table.h*
90 cscope.*
91 defkeymap.c
92+devicetable-offsets.h
93 devlist.h*
94 dnotify_test
95 docproc
96 dslm
97+dtc-lexer.lex.c
98 elf2ecoff
99 elfconfig.h*
100 evergreen_reg_safe.h
101+exception_policy.conf
102 fixdep
103 flask.h
104 fore200e_mkfirm
105@@ -128,12 +145,15 @@ fore200e_pca_fw.c*
106 gconf
107 gconf.glade.h
108 gen-devlist
109+gen-kdb_cmds.c
110 gen_crc32table
111 gen_init_cpio
112 generated
113 genheaders
114 genksyms
115 *_gray256.c
116+hash
117+hid-example
118 hpet_example
119 hugepage-mmap
120 hugepage-shm
121@@ -148,14 +168,14 @@ int32.c
122 int4.c
123 int8.c
124 kallsyms
125-kconfig
126+kern_constants.h
127 keywords.c
128 ksym.c*
129 ksym.h*
130 kxgettext
131 lex.c
132 lex.*.c
133-linux
134+lib1funcs.S
135 logo_*.c
136 logo_*_clut224.c
137 logo_*_mono.c
138@@ -165,14 +185,15 @@ mach-types.h
139 machtypes.h
140 map
141 map_hugetlb
142-media
143 mconf
144+mdp
145 miboot*
146 mk_elfconfig
147 mkboot
148 mkbugboot
149 mkcpustr
150 mkdep
151+mkpiggy
152 mkprep
153 mkregtable
154 mktables
155@@ -188,6 +209,8 @@ oui.c*
156 page-types
157 parse.c
158 parse.h
159+parse-events*
160+pasyms.h
161 patches*
162 pca200e.bin
163 pca200e_ecd.bin2
164@@ -197,6 +220,7 @@ perf-archive
165 piggyback
166 piggy.gzip
167 piggy.S
168+pmu-*
169 pnmtologo
170 ppc_defs.h*
171 pss_boot.h
172@@ -206,7 +230,12 @@ r200_reg_safe.h
173 r300_reg_safe.h
174 r420_reg_safe.h
175 r600_reg_safe.h
176+randomize_layout_hash.h
177+randomize_layout_seed.h
178+realmode.lds
179+realmode.relocs
180 recordmcount
181+regdb.c
182 relocs
183 rlim_names.h
184 rn50_reg_safe.h
185@@ -216,8 +245,12 @@ series
186 setup
187 setup.bin
188 setup.elf
189+signing_key*
190+size_overflow_hash.h
191 sImage
192+slabinfo
193 sm_tbl*
194+sortextable
195 split-include
196 syscalltab.h
197 tables.c
198@@ -227,6 +260,7 @@ tftpboot.img
199 timeconst.h
200 times.h*
201 trix_boot.h
202+user_constants.h
203 utsrelease.h*
204 vdso-syms.lds
205 vdso.lds
206@@ -238,13 +272,17 @@ vdso32.lds
207 vdso32.so.dbg
208 vdso64.lds
209 vdso64.so.dbg
210+vdsox32.lds
211+vdsox32-syms.lds
212 version.h*
213 vmImage
214 vmlinux
215 vmlinux-*
216 vmlinux.aout
217 vmlinux.bin.all
218+vmlinux.bin.bz2
219 vmlinux.lds
220+vmlinux.relocs
221 vmlinuz
222 voffset.h
223 vsyscall.lds
224@@ -252,9 +290,12 @@ vsyscall_32.lds
225 wanxlfw.inc
226 uImage
227 unifdef
228+utsrelease.h
229 wakeup.bin
230 wakeup.elf
231 wakeup.lds
232+x509*
233 zImage*
234 zconf.hash.c
235+zconf.lex.c
236 zoffset.h
237diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
238index 74b6c6d..eac0e77 100644
239--- a/Documentation/kbuild/makefiles.txt
240+++ b/Documentation/kbuild/makefiles.txt
241@@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
242 === 4 Host Program support
243 --- 4.1 Simple Host Program
244 --- 4.2 Composite Host Programs
245- --- 4.3 Using C++ for host programs
246- --- 4.4 Controlling compiler options for host programs
247- --- 4.5 When host programs are actually built
248- --- 4.6 Using hostprogs-$(CONFIG_FOO)
249+ --- 4.3 Defining shared libraries
250+ --- 4.4 Using C++ for host programs
251+ --- 4.5 Controlling compiler options for host programs
252+ --- 4.6 When host programs are actually built
253+ --- 4.7 Using hostprogs-$(CONFIG_FOO)
254
255 === 5 Kbuild clean infrastructure
256
257@@ -643,7 +644,29 @@ Both possibilities are described in the following.
258 Finally, the two .o files are linked to the executable, lxdialog.
259 Note: The syntax <executable>-y is not permitted for host-programs.
260
261---- 4.3 Using C++ for host programs
262+--- 4.3 Defining shared libraries
263+
264+ Objects with extension .so are considered shared libraries, and
265+ will be compiled as position independent objects.
266+ Kbuild provides support for shared libraries, but the usage
267+ shall be restricted.
268+ In the following example the libkconfig.so shared library is used
269+ to link the executable conf.
270+
271+ Example:
272+ #scripts/kconfig/Makefile
273+ hostprogs-y := conf
274+ conf-objs := conf.o libkconfig.so
275+ libkconfig-objs := expr.o type.o
276+
277+ Shared libraries always require a corresponding -objs line, and
278+ in the example above the shared library libkconfig is composed by
279+ the two objects expr.o and type.o.
280+ expr.o and type.o will be built as position independent code and
281+ linked as a shared library libkconfig.so. C++ is not supported for
282+ shared libraries.
283+
284+--- 4.4 Using C++ for host programs
285
286 kbuild offers support for host programs written in C++. This was
287 introduced solely to support kconfig, and is not recommended
288@@ -666,7 +689,7 @@ Both possibilities are described in the following.
289 qconf-cxxobjs := qconf.o
290 qconf-objs := check.o
291
292---- 4.4 Controlling compiler options for host programs
293+--- 4.5 Controlling compiler options for host programs
294
295 When compiling host programs, it is possible to set specific flags.
296 The programs will always be compiled utilising $(HOSTCC) passed
297@@ -694,7 +717,7 @@ Both possibilities are described in the following.
298 When linking qconf, it will be passed the extra option
299 "-L$(QTDIR)/lib".
300
301---- 4.5 When host programs are actually built
302+--- 4.6 When host programs are actually built
303
304 Kbuild will only build host-programs when they are referenced
305 as a prerequisite.
306@@ -725,7 +748,7 @@ Both possibilities are described in the following.
307 This will tell kbuild to build lxdialog even if not referenced in
308 any rule.
309
310---- 4.6 Using hostprogs-$(CONFIG_FOO)
311+--- 4.7 Using hostprogs-$(CONFIG_FOO)
312
313 A typical pattern in a Kbuild file looks like this:
314
315diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
316index 4d68ec8..9546b75 100644
317--- a/Documentation/kernel-parameters.txt
318+++ b/Documentation/kernel-parameters.txt
319@@ -1203,6 +1203,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
320 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
321 Default: 1024
322
323+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
324+ ignore grsecurity's /proc restrictions
325+
326+ grsec_sysfs_restrict= Format: 0 | 1
327+ Default: 1
328+ Disables GRKERNSEC_SYSFS_RESTRICT if enabled in config
329+
330 hashdist= [KNL,NUMA] Large hashes allocated during boot
331 are distributed across NUMA nodes. Defaults on
332 for 64-bit NUMA, off otherwise.
333@@ -2300,6 +2307,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
334 noexec=on: enable non-executable mappings (default)
335 noexec=off: disable non-executable mappings
336
337+ nopcid [X86-64]
338+ Disable PCID (Process-Context IDentifier) even if it
339+ is supported by the processor.
340+
341 nosmap [X86]
342 Disable SMAP (Supervisor Mode Access Prevention)
343 even if it is supported by processor.
344@@ -2601,6 +2612,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
345 the specified number of seconds. This is to be used if
346 your oopses keep scrolling off the screen.
347
348+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
349+ virtualization environments that don't cope well with the
350+ expand down segment used by UDEREF on X86-32 or the frequent
351+ page table updates on X86-64.
352+
353+ pax_sanitize_slab=
354+ Format: { 0 | 1 | off | fast | full }
355+ Options '0' and '1' are only provided for backward
356+ compatibility, 'off' or 'fast' should be used instead.
357+ 0|off : disable slab object sanitization
358+ 1|fast: enable slab object sanitization excluding
359+ whitelisted slabs (default)
360+ full : sanitize all slabs, even the whitelisted ones
361+
362+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
363+
364+ pax_extra_latent_entropy
365+ Enable a very simple form of latent entropy extraction
366+ from the first 4GB of memory as the bootmem allocator
367+ passes the memory pages to the buddy allocator.
368+
369+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
370+ when the processor supports PCID.
371+
372 pcbit= [HW,ISDN]
373
374 pcd. [PARIDE]
375diff --git a/Makefile b/Makefile
376index 3d16bcc..a3b342e 100644
377--- a/Makefile
378+++ b/Makefile
379@@ -298,7 +298,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
380 HOSTCC = gcc
381 HOSTCXX = g++
382 HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
383-HOSTCXXFLAGS = -O2
384+HOSTCFLAGS = -W -Wno-unused-parameter -Wno-missing-field-initializers -fno-delete-null-pointer-checks
385+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
386+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
387
388 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
389 HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
390@@ -446,8 +448,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
391 # Rules shared between *config targets and build targets
392
393 # Basic helpers built in scripts/
394-PHONY += scripts_basic
395-scripts_basic:
396+PHONY += scripts_basic gcc-plugins
397+scripts_basic: gcc-plugins
398 $(Q)$(MAKE) $(build)=scripts/basic
399 $(Q)rm -f .tmp_quiet_recordmcount
400
401@@ -622,6 +624,72 @@ endif
402 # Tell gcc to never replace conditional load with a non-conditional one
403 KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
404
405+ifndef DISABLE_PAX_PLUGINS
406+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
407+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
408+else
409+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
410+endif
411+ifneq ($(PLUGINCC),)
412+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
413+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
414+endif
415+ifdef CONFIG_PAX_MEMORY_STACKLEAK
416+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
417+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
418+endif
419+ifdef CONFIG_KALLOCSTAT_PLUGIN
420+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
421+endif
422+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
423+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
424+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
425+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
426+endif
427+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
428+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
429+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
430+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
431+endif
432+endif
433+ifdef CONFIG_CHECKER_PLUGIN
434+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
435+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
436+endif
437+endif
438+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
439+ifdef CONFIG_PAX_SIZE_OVERFLOW
440+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
441+endif
442+ifdef CONFIG_PAX_LATENT_ENTROPY
443+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
444+endif
445+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
446+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
447+endif
448+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
449+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
450+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
451+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
452+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
453+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
454+ifeq ($(KBUILD_EXTMOD),)
455+gcc-plugins:
456+ $(Q)$(MAKE) $(build)=tools/gcc
457+else
458+gcc-plugins: ;
459+endif
460+else
461+gcc-plugins:
462+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
463+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
464+else
465+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
466+endif
467+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
468+endif
469+endif
470+
471 ifdef CONFIG_READABLE_ASM
472 # Disable optimizations that make assembler listings hard to read.
473 # reorder blocks reorders the control in the function
474@@ -714,7 +782,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
475 else
476 KBUILD_CFLAGS += -g
477 endif
478-KBUILD_AFLAGS += -Wa,-gdwarf-2
479+KBUILD_AFLAGS += -Wa,--gdwarf-2
480 endif
481 ifdef CONFIG_DEBUG_INFO_DWARF4
482 KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
483@@ -884,7 +952,7 @@ export mod_sign_cmd
484
485
486 ifeq ($(KBUILD_EXTMOD),)
487-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
488+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
489
490 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
491 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
492@@ -934,6 +1002,8 @@ endif
493
494 # The actual objects are generated when descending,
495 # make sure no implicit rule kicks in
496+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
497+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
498 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
499
500 # Handle descending into subdirectories listed in $(vmlinux-dirs)
501@@ -943,7 +1013,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
502 # Error messages still appears in the original language
503
504 PHONY += $(vmlinux-dirs)
505-$(vmlinux-dirs): prepare scripts
506+$(vmlinux-dirs): gcc-plugins prepare scripts
507 $(Q)$(MAKE) $(build)=$@
508
509 define filechk_kernel.release
510@@ -986,10 +1056,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
511
512 archprepare: archheaders archscripts prepare1 scripts_basic
513
514+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
515+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
516 prepare0: archprepare FORCE
517 $(Q)$(MAKE) $(build)=.
518
519 # All the preparing..
520+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
521 prepare: prepare0
522
523 # Generate some files
524@@ -1103,6 +1176,8 @@ all: modules
525 # using awk while concatenating to the final file.
526
527 PHONY += modules
528+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
529+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
530 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
531 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
532 @$(kecho) ' Building modules, stage 2.';
533@@ -1118,7 +1193,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
534
535 # Target to prepare building external modules
536 PHONY += modules_prepare
537-modules_prepare: prepare scripts
538+modules_prepare: gcc-plugins prepare scripts
539
540 # Target to install modules
541 PHONY += modules_install
542@@ -1184,7 +1259,10 @@ MRPROPER_FILES += .config .config.old .version .old_version \
543 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
544 signing_key.priv signing_key.x509 x509.genkey \
545 extra_certificates signing_key.x509.keyid \
546- signing_key.x509.signer vmlinux-gdb.py
547+ signing_key.x509.signer vmlinux-gdb.py \
548+ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
549+ tools/gcc/size_overflow_plugin/size_overflow_hash.h \
550+ tools/gcc/randomize_layout_seed.h
551
552 # clean - Delete most, but leave enough to build external modules
553 #
554@@ -1223,7 +1301,7 @@ distclean: mrproper
555 @find $(srctree) $(RCS_FIND_IGNORE) \
556 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
557 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
558- -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \
559+ -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
560 -type f -print | xargs rm -f
561
562
563@@ -1389,6 +1467,8 @@ PHONY += $(module-dirs) modules
564 $(module-dirs): crmodverdir $(objtree)/Module.symvers
565 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
566
567+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
568+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
569 modules: $(module-dirs)
570 @$(kecho) ' Building modules, stage 2.';
571 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
572@@ -1529,17 +1609,21 @@ else
573 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
574 endif
575
576-%.s: %.c prepare scripts FORCE
577+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
578+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
579+%.s: %.c gcc-plugins prepare scripts FORCE
580 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
581 %.i: %.c prepare scripts FORCE
582 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
583-%.o: %.c prepare scripts FORCE
584+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
585+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
586+%.o: %.c gcc-plugins prepare scripts FORCE
587 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
588 %.lst: %.c prepare scripts FORCE
589 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
590-%.s: %.S prepare scripts FORCE
591+%.s: %.S gcc-plugins prepare scripts FORCE
592 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
593-%.o: %.S prepare scripts FORCE
594+%.o: %.S gcc-plugins prepare scripts FORCE
595 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
596 %.symtypes: %.c prepare scripts FORCE
597 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
598@@ -1551,11 +1635,15 @@ endif
599 $(build)=$(build-dir)
600 # Make sure the latest headers are built for Documentation
601 Documentation/: headers_install
602-%/: prepare scripts FORCE
603+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
604+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
605+%/: gcc-plugins prepare scripts FORCE
606 $(cmd_crmodverdir)
607 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
608 $(build)=$(build-dir)
609-%.ko: prepare scripts FORCE
610+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
611+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
612+%.ko: gcc-plugins prepare scripts FORCE
613 $(cmd_crmodverdir)
614 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
615 $(build)=$(build-dir) $(@:.ko=.o)
616diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
617index 8f8eafb..3405f46 100644
618--- a/arch/alpha/include/asm/atomic.h
619+++ b/arch/alpha/include/asm/atomic.h
620@@ -239,4 +239,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
621 #define atomic_dec(v) atomic_sub(1,(v))
622 #define atomic64_dec(v) atomic64_sub(1,(v))
623
624+#define atomic64_read_unchecked(v) atomic64_read(v)
625+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
626+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
627+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
628+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
629+#define atomic64_inc_unchecked(v) atomic64_inc(v)
630+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
631+#define atomic64_dec_unchecked(v) atomic64_dec(v)
632+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
633+
634 #endif /* _ALPHA_ATOMIC_H */
635diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
636index ad368a9..fbe0f25 100644
637--- a/arch/alpha/include/asm/cache.h
638+++ b/arch/alpha/include/asm/cache.h
639@@ -4,19 +4,19 @@
640 #ifndef __ARCH_ALPHA_CACHE_H
641 #define __ARCH_ALPHA_CACHE_H
642
643+#include <linux/const.h>
644
645 /* Bytes per L1 (data) cache line. */
646 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
647-# define L1_CACHE_BYTES 64
648 # define L1_CACHE_SHIFT 6
649 #else
650 /* Both EV4 and EV5 are write-through, read-allocate,
651 direct-mapped, physical.
652 */
653-# define L1_CACHE_BYTES 32
654 # define L1_CACHE_SHIFT 5
655 #endif
656
657+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
658 #define SMP_CACHE_BYTES L1_CACHE_BYTES
659
660 #endif
661diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
662index 968d999..d36b2df 100644
663--- a/arch/alpha/include/asm/elf.h
664+++ b/arch/alpha/include/asm/elf.h
665@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
666
667 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
668
669+#ifdef CONFIG_PAX_ASLR
670+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
671+
672+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
673+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
674+#endif
675+
676 /* $0 is set by ld.so to a pointer to a function which might be
677 registered using atexit. This provides a mean for the dynamic
678 linker to call DT_FINI functions for shared libraries that have
679diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
680index aab14a0..b4fa3e7 100644
681--- a/arch/alpha/include/asm/pgalloc.h
682+++ b/arch/alpha/include/asm/pgalloc.h
683@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
684 pgd_set(pgd, pmd);
685 }
686
687+static inline void
688+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
689+{
690+ pgd_populate(mm, pgd, pmd);
691+}
692+
693 extern pgd_t *pgd_alloc(struct mm_struct *mm);
694
695 static inline void
696diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
697index a9a1195..e9b8417 100644
698--- a/arch/alpha/include/asm/pgtable.h
699+++ b/arch/alpha/include/asm/pgtable.h
700@@ -101,6 +101,17 @@ struct vm_area_struct;
701 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
702 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
703 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
704+
705+#ifdef CONFIG_PAX_PAGEEXEC
706+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
707+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
708+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
709+#else
710+# define PAGE_SHARED_NOEXEC PAGE_SHARED
711+# define PAGE_COPY_NOEXEC PAGE_COPY
712+# define PAGE_READONLY_NOEXEC PAGE_READONLY
713+#endif
714+
715 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
716
717 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
718diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
719index 2fd00b7..cfd5069 100644
720--- a/arch/alpha/kernel/module.c
721+++ b/arch/alpha/kernel/module.c
722@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
723
724 /* The small sections were sorted to the end of the segment.
725 The following should definitely cover them. */
726- gp = (u64)me->module_core + me->core_size - 0x8000;
727+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
728 got = sechdrs[me->arch.gotsecindex].sh_addr;
729
730 for (i = 0; i < n; i++) {
731diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
732index e51f578..16c64a3 100644
733--- a/arch/alpha/kernel/osf_sys.c
734+++ b/arch/alpha/kernel/osf_sys.c
735@@ -1296,10 +1296,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
736 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
737
738 static unsigned long
739-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
740- unsigned long limit)
741+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
742+ unsigned long limit, unsigned long flags)
743 {
744 struct vm_unmapped_area_info info;
745+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
746
747 info.flags = 0;
748 info.length = len;
749@@ -1307,6 +1308,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
750 info.high_limit = limit;
751 info.align_mask = 0;
752 info.align_offset = 0;
753+ info.threadstack_offset = offset;
754 return vm_unmapped_area(&info);
755 }
756
757@@ -1339,20 +1341,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
758 merely specific addresses, but regions of memory -- perhaps
759 this feature should be incorporated into all ports? */
760
761+#ifdef CONFIG_PAX_RANDMMAP
762+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
763+#endif
764+
765 if (addr) {
766- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
767+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
768 if (addr != (unsigned long) -ENOMEM)
769 return addr;
770 }
771
772 /* Next, try allocating at TASK_UNMAPPED_BASE. */
773- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
774- len, limit);
775+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
776+
777 if (addr != (unsigned long) -ENOMEM)
778 return addr;
779
780 /* Finally, try allocating in low memory. */
781- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
782+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
783
784 return addr;
785 }
786diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
787index 9d0ac09..479a962 100644
788--- a/arch/alpha/mm/fault.c
789+++ b/arch/alpha/mm/fault.c
790@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
791 __reload_thread(pcb);
792 }
793
794+#ifdef CONFIG_PAX_PAGEEXEC
795+/*
796+ * PaX: decide what to do with offenders (regs->pc = fault address)
797+ *
798+ * returns 1 when task should be killed
799+ * 2 when patched PLT trampoline was detected
800+ * 3 when unpatched PLT trampoline was detected
801+ */
802+static int pax_handle_fetch_fault(struct pt_regs *regs)
803+{
804+
805+#ifdef CONFIG_PAX_EMUPLT
806+ int err;
807+
808+ do { /* PaX: patched PLT emulation #1 */
809+ unsigned int ldah, ldq, jmp;
810+
811+ err = get_user(ldah, (unsigned int *)regs->pc);
812+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
813+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
814+
815+ if (err)
816+ break;
817+
818+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
819+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
820+ jmp == 0x6BFB0000U)
821+ {
822+ unsigned long r27, addr;
823+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
824+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
825+
826+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
827+ err = get_user(r27, (unsigned long *)addr);
828+ if (err)
829+ break;
830+
831+ regs->r27 = r27;
832+ regs->pc = r27;
833+ return 2;
834+ }
835+ } while (0);
836+
837+ do { /* PaX: patched PLT emulation #2 */
838+ unsigned int ldah, lda, br;
839+
840+ err = get_user(ldah, (unsigned int *)regs->pc);
841+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
842+ err |= get_user(br, (unsigned int *)(regs->pc+8));
843+
844+ if (err)
845+ break;
846+
847+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
848+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
849+ (br & 0xFFE00000U) == 0xC3E00000U)
850+ {
851+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
852+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
853+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
854+
855+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
856+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
857+ return 2;
858+ }
859+ } while (0);
860+
861+ do { /* PaX: unpatched PLT emulation */
862+ unsigned int br;
863+
864+ err = get_user(br, (unsigned int *)regs->pc);
865+
866+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
867+ unsigned int br2, ldq, nop, jmp;
868+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
869+
870+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
871+ err = get_user(br2, (unsigned int *)addr);
872+ err |= get_user(ldq, (unsigned int *)(addr+4));
873+ err |= get_user(nop, (unsigned int *)(addr+8));
874+ err |= get_user(jmp, (unsigned int *)(addr+12));
875+ err |= get_user(resolver, (unsigned long *)(addr+16));
876+
877+ if (err)
878+ break;
879+
880+ if (br2 == 0xC3600000U &&
881+ ldq == 0xA77B000CU &&
882+ nop == 0x47FF041FU &&
883+ jmp == 0x6B7B0000U)
884+ {
885+ regs->r28 = regs->pc+4;
886+ regs->r27 = addr+16;
887+ regs->pc = resolver;
888+ return 3;
889+ }
890+ }
891+ } while (0);
892+#endif
893+
894+ return 1;
895+}
896+
897+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
898+{
899+ unsigned long i;
900+
901+ printk(KERN_ERR "PAX: bytes at PC: ");
902+ for (i = 0; i < 5; i++) {
903+ unsigned int c;
904+ if (get_user(c, (unsigned int *)pc+i))
905+ printk(KERN_CONT "???????? ");
906+ else
907+ printk(KERN_CONT "%08x ", c);
908+ }
909+ printk("\n");
910+}
911+#endif
912
913 /*
914 * This routine handles page faults. It determines the address,
915@@ -133,8 +251,29 @@ retry:
916 good_area:
917 si_code = SEGV_ACCERR;
918 if (cause < 0) {
919- if (!(vma->vm_flags & VM_EXEC))
920+ if (!(vma->vm_flags & VM_EXEC)) {
921+
922+#ifdef CONFIG_PAX_PAGEEXEC
923+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
924+ goto bad_area;
925+
926+ up_read(&mm->mmap_sem);
927+ switch (pax_handle_fetch_fault(regs)) {
928+
929+#ifdef CONFIG_PAX_EMUPLT
930+ case 2:
931+ case 3:
932+ return;
933+#endif
934+
935+ }
936+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
937+ do_group_exit(SIGKILL);
938+#else
939 goto bad_area;
940+#endif
941+
942+ }
943 } else if (!cause) {
944 /* Allow reads even for write-only mappings */
945 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
946diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
947index cf4c0c9..a87ecf5 100644
948--- a/arch/arm/Kconfig
949+++ b/arch/arm/Kconfig
950@@ -1735,7 +1735,7 @@ config ALIGNMENT_TRAP
951
952 config UACCESS_WITH_MEMCPY
953 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
954- depends on MMU
955+ depends on MMU && !PAX_MEMORY_UDEREF
956 default y if CPU_FEROCEON
957 help
958 Implement faster copy_to_user and clear_user methods for CPU
959@@ -1999,6 +1999,7 @@ config XIP_PHYS_ADDR
960 config KEXEC
961 bool "Kexec system call (EXPERIMENTAL)"
962 depends on (!SMP || PM_SLEEP_SMP)
963+ depends on !GRKERNSEC_KMEM
964 help
965 kexec is a system call that implements the ability to shutdown your
966 current kernel, and to start another kernel. It is like a reboot
967diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
968index e22c119..abe7041 100644
969--- a/arch/arm/include/asm/atomic.h
970+++ b/arch/arm/include/asm/atomic.h
971@@ -18,17 +18,41 @@
972 #include <asm/barrier.h>
973 #include <asm/cmpxchg.h>
974
975+#ifdef CONFIG_GENERIC_ATOMIC64
976+#include <asm-generic/atomic64.h>
977+#endif
978+
979 #define ATOMIC_INIT(i) { (i) }
980
981 #ifdef __KERNEL__
982
983+#ifdef CONFIG_THUMB2_KERNEL
984+#define REFCOUNT_TRAP_INSN "bkpt 0xf1"
985+#else
986+#define REFCOUNT_TRAP_INSN "bkpt 0xf103"
987+#endif
988+
989+#define _ASM_EXTABLE(from, to) \
990+" .pushsection __ex_table,\"a\"\n"\
991+" .align 3\n" \
992+" .long " #from ", " #to"\n" \
993+" .popsection"
994+
995 /*
996 * On ARM, ordinary assignment (str instruction) doesn't clear the local
997 * strex/ldrex monitor on some implementations. The reason we can use it for
998 * atomic_set() is the clrex or dummy strex done on every exception return.
999 */
1000 #define atomic_read(v) ACCESS_ONCE((v)->counter)
1001+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
1002+{
1003+ return ACCESS_ONCE(v->counter);
1004+}
1005 #define atomic_set(v,i) (((v)->counter) = (i))
1006+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
1007+{
1008+ v->counter = i;
1009+}
1010
1011 #if __LINUX_ARM_ARCH__ >= 6
1012
1013@@ -38,26 +62,50 @@
1014 * to ensure that the update happens.
1015 */
1016
1017-#define ATOMIC_OP(op, c_op, asm_op) \
1018-static inline void atomic_##op(int i, atomic_t *v) \
1019+#ifdef CONFIG_PAX_REFCOUNT
1020+#define __OVERFLOW_POST \
1021+ " bvc 3f\n" \
1022+ "2: " REFCOUNT_TRAP_INSN "\n"\
1023+ "3:\n"
1024+#define __OVERFLOW_POST_RETURN \
1025+ " bvc 3f\n" \
1026+" mov %0, %1\n" \
1027+ "2: " REFCOUNT_TRAP_INSN "\n"\
1028+ "3:\n"
1029+#define __OVERFLOW_EXTABLE \
1030+ "4:\n" \
1031+ _ASM_EXTABLE(2b, 4b)
1032+#else
1033+#define __OVERFLOW_POST
1034+#define __OVERFLOW_POST_RETURN
1035+#define __OVERFLOW_EXTABLE
1036+#endif
1037+
1038+#define __ATOMIC_OP(op, suffix, c_op, asm_op, post_op, extable) \
1039+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1040 { \
1041 unsigned long tmp; \
1042 int result; \
1043 \
1044 prefetchw(&v->counter); \
1045- __asm__ __volatile__("@ atomic_" #op "\n" \
1046+ __asm__ __volatile__("@ atomic_" #op #suffix "\n" \
1047 "1: ldrex %0, [%3]\n" \
1048 " " #asm_op " %0, %0, %4\n" \
1049+ post_op \
1050 " strex %1, %0, [%3]\n" \
1051 " teq %1, #0\n" \
1052-" bne 1b" \
1053+" bne 1b\n" \
1054+ extable \
1055 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1056 : "r" (&v->counter), "Ir" (i) \
1057 : "cc"); \
1058 } \
1059
1060-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1061-static inline int atomic_##op##_return(int i, atomic_t *v) \
1062+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, _unchecked, c_op, asm_op, , )\
1063+ __ATOMIC_OP(op, , c_op, asm_op##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1064+
1065+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op, post_op, extable) \
1066+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1067 { \
1068 unsigned long tmp; \
1069 int result; \
1070@@ -65,12 +113,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1071 smp_mb(); \
1072 prefetchw(&v->counter); \
1073 \
1074- __asm__ __volatile__("@ atomic_" #op "_return\n" \
1075+ __asm__ __volatile__("@ atomic_" #op "_return" #suffix "\n" \
1076 "1: ldrex %0, [%3]\n" \
1077 " " #asm_op " %0, %0, %4\n" \
1078+ post_op \
1079 " strex %1, %0, [%3]\n" \
1080 " teq %1, #0\n" \
1081-" bne 1b" \
1082+" bne 1b\n" \
1083+ extable \
1084 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1085 : "r" (&v->counter), "Ir" (i) \
1086 : "cc"); \
1087@@ -80,6 +130,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1088 return result; \
1089 }
1090
1091+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op, , )\
1092+ __ATOMIC_OP_RETURN(op, , c_op, asm_op##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1093+
1094 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1095 {
1096 int oldval;
1097@@ -115,12 +168,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1098 __asm__ __volatile__ ("@ atomic_add_unless\n"
1099 "1: ldrex %0, [%4]\n"
1100 " teq %0, %5\n"
1101-" beq 2f\n"
1102-" add %1, %0, %6\n"
1103+" beq 4f\n"
1104+" adds %1, %0, %6\n"
1105+
1106+#ifdef CONFIG_PAX_REFCOUNT
1107+" bvc 3f\n"
1108+"2: " REFCOUNT_TRAP_INSN "\n"
1109+"3:\n"
1110+#endif
1111+
1112 " strex %2, %1, [%4]\n"
1113 " teq %2, #0\n"
1114 " bne 1b\n"
1115-"2:"
1116+"4:"
1117+
1118+#ifdef CONFIG_PAX_REFCOUNT
1119+ _ASM_EXTABLE(2b, 4b)
1120+#endif
1121+
1122 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
1123 : "r" (&v->counter), "r" (u), "r" (a)
1124 : "cc");
1125@@ -131,14 +196,36 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1126 return oldval;
1127 }
1128
1129+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1130+{
1131+ unsigned long oldval, res;
1132+
1133+ smp_mb();
1134+
1135+ do {
1136+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1137+ "ldrex %1, [%3]\n"
1138+ "mov %0, #0\n"
1139+ "teq %1, %4\n"
1140+ "strexeq %0, %5, [%3]\n"
1141+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1142+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1143+ : "cc");
1144+ } while (res);
1145+
1146+ smp_mb();
1147+
1148+ return oldval;
1149+}
1150+
1151 #else /* ARM_ARCH_6 */
1152
1153 #ifdef CONFIG_SMP
1154 #error SMP not supported on pre-ARMv6 CPUs
1155 #endif
1156
1157-#define ATOMIC_OP(op, c_op, asm_op) \
1158-static inline void atomic_##op(int i, atomic_t *v) \
1159+#define __ATOMIC_OP(op, suffix, c_op, asm_op) \
1160+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1161 { \
1162 unsigned long flags; \
1163 \
1164@@ -147,8 +234,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
1165 raw_local_irq_restore(flags); \
1166 } \
1167
1168-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1169-static inline int atomic_##op##_return(int i, atomic_t *v) \
1170+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op) \
1171+ __ATOMIC_OP(op, _unchecked, c_op, asm_op)
1172+
1173+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op) \
1174+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1175 { \
1176 unsigned long flags; \
1177 int val; \
1178@@ -161,6 +251,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1179 return val; \
1180 }
1181
1182+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op)\
1183+ __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)
1184+
1185 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1186 {
1187 int ret;
1188@@ -175,6 +268,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1189 return ret;
1190 }
1191
1192+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1193+{
1194+ return atomic_cmpxchg((atomic_t *)v, old, new);
1195+}
1196+
1197 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1198 {
1199 int c, old;
1200@@ -196,16 +294,38 @@ ATOMIC_OPS(sub, -=, sub)
1201
1202 #undef ATOMIC_OPS
1203 #undef ATOMIC_OP_RETURN
1204+#undef __ATOMIC_OP_RETURN
1205 #undef ATOMIC_OP
1206+#undef __ATOMIC_OP
1207
1208 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1209+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1210+{
1211+ return xchg(&v->counter, new);
1212+}
1213
1214 #define atomic_inc(v) atomic_add(1, v)
1215+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1216+{
1217+ atomic_add_unchecked(1, v);
1218+}
1219 #define atomic_dec(v) atomic_sub(1, v)
1220+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1221+{
1222+ atomic_sub_unchecked(1, v);
1223+}
1224
1225 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1226+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1227+{
1228+ return atomic_add_return_unchecked(1, v) == 0;
1229+}
1230 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1231 #define atomic_inc_return(v) (atomic_add_return(1, v))
1232+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1233+{
1234+ return atomic_add_return_unchecked(1, v);
1235+}
1236 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1237 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1238
1239@@ -216,6 +336,14 @@ typedef struct {
1240 long long counter;
1241 } atomic64_t;
1242
1243+#ifdef CONFIG_PAX_REFCOUNT
1244+typedef struct {
1245+ long long counter;
1246+} atomic64_unchecked_t;
1247+#else
1248+typedef atomic64_t atomic64_unchecked_t;
1249+#endif
1250+
1251 #define ATOMIC64_INIT(i) { (i) }
1252
1253 #ifdef CONFIG_ARM_LPAE
1254@@ -232,6 +360,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1255 return result;
1256 }
1257
1258+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1259+{
1260+ long long result;
1261+
1262+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1263+" ldrd %0, %H0, [%1]"
1264+ : "=&r" (result)
1265+ : "r" (&v->counter), "Qo" (v->counter)
1266+ );
1267+
1268+ return result;
1269+}
1270+
1271 static inline void atomic64_set(atomic64_t *v, long long i)
1272 {
1273 __asm__ __volatile__("@ atomic64_set\n"
1274@@ -240,6 +381,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1275 : "r" (&v->counter), "r" (i)
1276 );
1277 }
1278+
1279+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1280+{
1281+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1282+" strd %2, %H2, [%1]"
1283+ : "=Qo" (v->counter)
1284+ : "r" (&v->counter), "r" (i)
1285+ );
1286+}
1287 #else
1288 static inline long long atomic64_read(const atomic64_t *v)
1289 {
1290@@ -254,6 +404,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1291 return result;
1292 }
1293
1294+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1295+{
1296+ long long result;
1297+
1298+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1299+" ldrexd %0, %H0, [%1]"
1300+ : "=&r" (result)
1301+ : "r" (&v->counter), "Qo" (v->counter)
1302+ );
1303+
1304+ return result;
1305+}
1306+
1307 static inline void atomic64_set(atomic64_t *v, long long i)
1308 {
1309 long long tmp;
1310@@ -268,29 +431,57 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1311 : "r" (&v->counter), "r" (i)
1312 : "cc");
1313 }
1314+
1315+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1316+{
1317+ long long tmp;
1318+
1319+ prefetchw(&v->counter);
1320+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1321+"1: ldrexd %0, %H0, [%2]\n"
1322+" strexd %0, %3, %H3, [%2]\n"
1323+" teq %0, #0\n"
1324+" bne 1b"
1325+ : "=&r" (tmp), "=Qo" (v->counter)
1326+ : "r" (&v->counter), "r" (i)
1327+ : "cc");
1328+}
1329 #endif
1330
1331-#define ATOMIC64_OP(op, op1, op2) \
1332-static inline void atomic64_##op(long long i, atomic64_t *v) \
1333+#undef __OVERFLOW_POST_RETURN
1334+#define __OVERFLOW_POST_RETURN \
1335+ " bvc 3f\n" \
1336+" mov %0, %1\n" \
1337+" mov %H0, %H1\n" \
1338+ "2: " REFCOUNT_TRAP_INSN "\n"\
1339+ "3:\n"
1340+
1341+#define __ATOMIC64_OP(op, suffix, op1, op2, post_op, extable) \
1342+static inline void atomic64_##op##suffix(long long i, atomic64##suffix##_t *v)\
1343 { \
1344 long long result; \
1345 unsigned long tmp; \
1346 \
1347 prefetchw(&v->counter); \
1348- __asm__ __volatile__("@ atomic64_" #op "\n" \
1349+ __asm__ __volatile__("@ atomic64_" #op #suffix "\n" \
1350 "1: ldrexd %0, %H0, [%3]\n" \
1351 " " #op1 " %Q0, %Q0, %Q4\n" \
1352 " " #op2 " %R0, %R0, %R4\n" \
1353+ post_op \
1354 " strexd %1, %0, %H0, [%3]\n" \
1355 " teq %1, #0\n" \
1356-" bne 1b" \
1357+" bne 1b\n" \
1358+ extable \
1359 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1360 : "r" (&v->counter), "r" (i) \
1361 : "cc"); \
1362 } \
1363
1364-#define ATOMIC64_OP_RETURN(op, op1, op2) \
1365-static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1366+#define ATOMIC64_OP(op, op1, op2) __ATOMIC64_OP(op, _unchecked, op1, op2, , ) \
1367+ __ATOMIC64_OP(op, , op1, op2##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1368+
1369+#define __ATOMIC64_OP_RETURN(op, suffix, op1, op2, post_op, extable) \
1370+static inline long long atomic64_##op##_return##suffix(long long i, atomic64##suffix##_t *v) \
1371 { \
1372 long long result; \
1373 unsigned long tmp; \
1374@@ -298,13 +489,15 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1375 smp_mb(); \
1376 prefetchw(&v->counter); \
1377 \
1378- __asm__ __volatile__("@ atomic64_" #op "_return\n" \
1379+ __asm__ __volatile__("@ atomic64_" #op "_return" #suffix "\n" \
1380 "1: ldrexd %0, %H0, [%3]\n" \
1381 " " #op1 " %Q0, %Q0, %Q4\n" \
1382 " " #op2 " %R0, %R0, %R4\n" \
1383+ post_op \
1384 " strexd %1, %0, %H0, [%3]\n" \
1385 " teq %1, #0\n" \
1386-" bne 1b" \
1387+" bne 1b\n" \
1388+ extable \
1389 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1390 : "r" (&v->counter), "r" (i) \
1391 : "cc"); \
1392@@ -314,6 +507,9 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1393 return result; \
1394 }
1395
1396+#define ATOMIC64_OP_RETURN(op, op1, op2) __ATOMIC64_OP_RETURN(op, _unchecked, op1, op2, , ) \
1397+ __ATOMIC64_OP_RETURN(op, , op1, op2##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1398+
1399 #define ATOMIC64_OPS(op, op1, op2) \
1400 ATOMIC64_OP(op, op1, op2) \
1401 ATOMIC64_OP_RETURN(op, op1, op2)
1402@@ -323,7 +519,12 @@ ATOMIC64_OPS(sub, subs, sbc)
1403
1404 #undef ATOMIC64_OPS
1405 #undef ATOMIC64_OP_RETURN
1406+#undef __ATOMIC64_OP_RETURN
1407 #undef ATOMIC64_OP
1408+#undef __ATOMIC64_OP
1409+#undef __OVERFLOW_EXTABLE
1410+#undef __OVERFLOW_POST_RETURN
1411+#undef __OVERFLOW_POST
1412
1413 static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1414 long long new)
1415@@ -351,6 +552,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1416 return oldval;
1417 }
1418
1419+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
1420+ long long new)
1421+{
1422+ long long oldval;
1423+ unsigned long res;
1424+
1425+ smp_mb();
1426+
1427+ do {
1428+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1429+ "ldrexd %1, %H1, [%3]\n"
1430+ "mov %0, #0\n"
1431+ "teq %1, %4\n"
1432+ "teqeq %H1, %H4\n"
1433+ "strexdeq %0, %5, %H5, [%3]"
1434+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1435+ : "r" (&ptr->counter), "r" (old), "r" (new)
1436+ : "cc");
1437+ } while (res);
1438+
1439+ smp_mb();
1440+
1441+ return oldval;
1442+}
1443+
1444 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1445 {
1446 long long result;
1447@@ -376,21 +602,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1448 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1449 {
1450 long long result;
1451- unsigned long tmp;
1452+ u64 tmp;
1453
1454 smp_mb();
1455 prefetchw(&v->counter);
1456
1457 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1458-"1: ldrexd %0, %H0, [%3]\n"
1459-" subs %Q0, %Q0, #1\n"
1460-" sbc %R0, %R0, #0\n"
1461+"1: ldrexd %1, %H1, [%3]\n"
1462+" subs %Q0, %Q1, #1\n"
1463+" sbcs %R0, %R1, #0\n"
1464+
1465+#ifdef CONFIG_PAX_REFCOUNT
1466+" bvc 3f\n"
1467+" mov %Q0, %Q1\n"
1468+" mov %R0, %R1\n"
1469+"2: " REFCOUNT_TRAP_INSN "\n"
1470+"3:\n"
1471+#endif
1472+
1473 " teq %R0, #0\n"
1474-" bmi 2f\n"
1475+" bmi 4f\n"
1476 " strexd %1, %0, %H0, [%3]\n"
1477 " teq %1, #0\n"
1478 " bne 1b\n"
1479-"2:"
1480+"4:\n"
1481+
1482+#ifdef CONFIG_PAX_REFCOUNT
1483+ _ASM_EXTABLE(2b, 4b)
1484+#endif
1485+
1486 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1487 : "r" (&v->counter)
1488 : "cc");
1489@@ -414,13 +654,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1490 " teq %0, %5\n"
1491 " teqeq %H0, %H5\n"
1492 " moveq %1, #0\n"
1493-" beq 2f\n"
1494+" beq 4f\n"
1495 " adds %Q0, %Q0, %Q6\n"
1496-" adc %R0, %R0, %R6\n"
1497+" adcs %R0, %R0, %R6\n"
1498+
1499+#ifdef CONFIG_PAX_REFCOUNT
1500+" bvc 3f\n"
1501+"2: " REFCOUNT_TRAP_INSN "\n"
1502+"3:\n"
1503+#endif
1504+
1505 " strexd %2, %0, %H0, [%4]\n"
1506 " teq %2, #0\n"
1507 " bne 1b\n"
1508-"2:"
1509+"4:\n"
1510+
1511+#ifdef CONFIG_PAX_REFCOUNT
1512+ _ASM_EXTABLE(2b, 4b)
1513+#endif
1514+
1515 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1516 : "r" (&v->counter), "r" (u), "r" (a)
1517 : "cc");
1518@@ -433,10 +685,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1519
1520 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1521 #define atomic64_inc(v) atomic64_add(1LL, (v))
1522+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1523 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1524+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1525 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1526 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1527 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1528+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1529 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1530 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1531 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1532diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
1533index d2f81e6..3c4dba5 100644
1534--- a/arch/arm/include/asm/barrier.h
1535+++ b/arch/arm/include/asm/barrier.h
1536@@ -67,7 +67,7 @@
1537 do { \
1538 compiletime_assert_atomic_type(*p); \
1539 smp_mb(); \
1540- ACCESS_ONCE(*p) = (v); \
1541+ ACCESS_ONCE_RW(*p) = (v); \
1542 } while (0)
1543
1544 #define smp_load_acquire(p) \
1545diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1546index 75fe66b..ba3dee4 100644
1547--- a/arch/arm/include/asm/cache.h
1548+++ b/arch/arm/include/asm/cache.h
1549@@ -4,8 +4,10 @@
1550 #ifndef __ASMARM_CACHE_H
1551 #define __ASMARM_CACHE_H
1552
1553+#include <linux/const.h>
1554+
1555 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1556-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1557+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1558
1559 /*
1560 * Memory returned by kmalloc() may be used for DMA, so we must make
1561@@ -24,5 +26,6 @@
1562 #endif
1563
1564 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1565+#define __read_only __attribute__ ((__section__(".data..read_only")))
1566
1567 #endif
1568diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1569index 2d46862..a35415b 100644
1570--- a/arch/arm/include/asm/cacheflush.h
1571+++ b/arch/arm/include/asm/cacheflush.h
1572@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1573 void (*dma_unmap_area)(const void *, size_t, int);
1574
1575 void (*dma_flush_range)(const void *, const void *);
1576-};
1577+} __no_const;
1578
1579 /*
1580 * Select the calling method
1581diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1582index 5233151..87a71fa 100644
1583--- a/arch/arm/include/asm/checksum.h
1584+++ b/arch/arm/include/asm/checksum.h
1585@@ -37,7 +37,19 @@ __wsum
1586 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1587
1588 __wsum
1589-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1590+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1591+
1592+static inline __wsum
1593+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1594+{
1595+ __wsum ret;
1596+ pax_open_userland();
1597+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1598+ pax_close_userland();
1599+ return ret;
1600+}
1601+
1602+
1603
1604 /*
1605 * Fold a partial checksum without adding pseudo headers
1606diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1607index abb2c37..96db950 100644
1608--- a/arch/arm/include/asm/cmpxchg.h
1609+++ b/arch/arm/include/asm/cmpxchg.h
1610@@ -104,6 +104,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1611
1612 #define xchg(ptr,x) \
1613 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1614+#define xchg_unchecked(ptr,x) \
1615+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1616
1617 #include <asm-generic/cmpxchg-local.h>
1618
1619diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1620index 6ddbe44..b5e38b1a 100644
1621--- a/arch/arm/include/asm/domain.h
1622+++ b/arch/arm/include/asm/domain.h
1623@@ -48,18 +48,37 @@
1624 * Domain types
1625 */
1626 #define DOMAIN_NOACCESS 0
1627-#define DOMAIN_CLIENT 1
1628 #ifdef CONFIG_CPU_USE_DOMAINS
1629+#define DOMAIN_USERCLIENT 1
1630+#define DOMAIN_KERNELCLIENT 1
1631 #define DOMAIN_MANAGER 3
1632+#define DOMAIN_VECTORS DOMAIN_USER
1633 #else
1634+
1635+#ifdef CONFIG_PAX_KERNEXEC
1636 #define DOMAIN_MANAGER 1
1637+#define DOMAIN_KERNEXEC 3
1638+#else
1639+#define DOMAIN_MANAGER 1
1640+#endif
1641+
1642+#ifdef CONFIG_PAX_MEMORY_UDEREF
1643+#define DOMAIN_USERCLIENT 0
1644+#define DOMAIN_UDEREF 1
1645+#define DOMAIN_VECTORS DOMAIN_KERNEL
1646+#else
1647+#define DOMAIN_USERCLIENT 1
1648+#define DOMAIN_VECTORS DOMAIN_USER
1649+#endif
1650+#define DOMAIN_KERNELCLIENT 1
1651+
1652 #endif
1653
1654 #define domain_val(dom,type) ((type) << (2*(dom)))
1655
1656 #ifndef __ASSEMBLY__
1657
1658-#ifdef CONFIG_CPU_USE_DOMAINS
1659+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1660 static inline void set_domain(unsigned val)
1661 {
1662 asm volatile(
1663@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1664 isb();
1665 }
1666
1667-#define modify_domain(dom,type) \
1668- do { \
1669- struct thread_info *thread = current_thread_info(); \
1670- unsigned int domain = thread->cpu_domain; \
1671- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1672- thread->cpu_domain = domain | domain_val(dom, type); \
1673- set_domain(thread->cpu_domain); \
1674- } while (0)
1675-
1676+extern void modify_domain(unsigned int dom, unsigned int type);
1677 #else
1678 static inline void set_domain(unsigned val) { }
1679 static inline void modify_domain(unsigned dom, unsigned type) { }
1680diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1681index 674d03f..9a0bac0 100644
1682--- a/arch/arm/include/asm/elf.h
1683+++ b/arch/arm/include/asm/elf.h
1684@@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1685 the loader. We need to make sure that it is out of the way of the program
1686 that it will "exec", and that there is sufficient room for the brk. */
1687
1688-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1689+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1690+
1691+#ifdef CONFIG_PAX_ASLR
1692+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1693+
1694+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1695+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1696+#endif
1697
1698 /* When the program starts, a1 contains a pointer to a function to be
1699 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1700@@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1701 extern void elf_set_personality(const struct elf32_hdr *);
1702 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1703
1704-struct mm_struct;
1705-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1706-#define arch_randomize_brk arch_randomize_brk
1707-
1708 #ifdef CONFIG_MMU
1709 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1710 struct linux_binprm;
1711diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1712index de53547..52b9a28 100644
1713--- a/arch/arm/include/asm/fncpy.h
1714+++ b/arch/arm/include/asm/fncpy.h
1715@@ -81,7 +81,9 @@
1716 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1717 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1718 \
1719+ pax_open_kernel(); \
1720 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1721+ pax_close_kernel(); \
1722 flush_icache_range((unsigned long)(dest_buf), \
1723 (unsigned long)(dest_buf) + (size)); \
1724 \
1725diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1726index 53e69da..3fdc896 100644
1727--- a/arch/arm/include/asm/futex.h
1728+++ b/arch/arm/include/asm/futex.h
1729@@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1730 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1731 return -EFAULT;
1732
1733+ pax_open_userland();
1734+
1735 smp_mb();
1736 /* Prefetching cannot fault */
1737 prefetchw(uaddr);
1738@@ -63,6 +65,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1739 : "cc", "memory");
1740 smp_mb();
1741
1742+ pax_close_userland();
1743+
1744 *uval = val;
1745 return ret;
1746 }
1747@@ -93,6 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1748 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1749 return -EFAULT;
1750
1751+ pax_open_userland();
1752+
1753 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1754 "1: " TUSER(ldr) " %1, [%4]\n"
1755 " teq %1, %2\n"
1756@@ -103,6 +109,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1757 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1758 : "cc", "memory");
1759
1760+ pax_close_userland();
1761+
1762 *uval = val;
1763 return ret;
1764 }
1765@@ -125,6 +133,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1766 return -EFAULT;
1767
1768 pagefault_disable(); /* implies preempt_disable() */
1769+ pax_open_userland();
1770
1771 switch (op) {
1772 case FUTEX_OP_SET:
1773@@ -146,6 +155,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1774 ret = -ENOSYS;
1775 }
1776
1777+ pax_close_userland();
1778 pagefault_enable(); /* subsumes preempt_enable() */
1779
1780 if (!ret) {
1781diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1782index 83eb2f7..ed77159 100644
1783--- a/arch/arm/include/asm/kmap_types.h
1784+++ b/arch/arm/include/asm/kmap_types.h
1785@@ -4,6 +4,6 @@
1786 /*
1787 * This is the "bare minimum". AIO seems to require this.
1788 */
1789-#define KM_TYPE_NR 16
1790+#define KM_TYPE_NR 17
1791
1792 #endif
1793diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1794index 9e614a1..3302cca 100644
1795--- a/arch/arm/include/asm/mach/dma.h
1796+++ b/arch/arm/include/asm/mach/dma.h
1797@@ -22,7 +22,7 @@ struct dma_ops {
1798 int (*residue)(unsigned int, dma_t *); /* optional */
1799 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1800 const char *type;
1801-};
1802+} __do_const;
1803
1804 struct dma_struct {
1805 void *addr; /* single DMA address */
1806diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1807index f98c7f3..e5c626d 100644
1808--- a/arch/arm/include/asm/mach/map.h
1809+++ b/arch/arm/include/asm/mach/map.h
1810@@ -23,17 +23,19 @@ struct map_desc {
1811
1812 /* types 0-3 are defined in asm/io.h */
1813 enum {
1814- MT_UNCACHED = 4,
1815- MT_CACHECLEAN,
1816- MT_MINICLEAN,
1817+ MT_UNCACHED_RW = 4,
1818+ MT_CACHECLEAN_RO,
1819+ MT_MINICLEAN_RO,
1820 MT_LOW_VECTORS,
1821 MT_HIGH_VECTORS,
1822- MT_MEMORY_RWX,
1823+ __MT_MEMORY_RWX,
1824 MT_MEMORY_RW,
1825- MT_ROM,
1826- MT_MEMORY_RWX_NONCACHED,
1827+ MT_MEMORY_RX,
1828+ MT_ROM_RX,
1829+ MT_MEMORY_RW_NONCACHED,
1830+ MT_MEMORY_RX_NONCACHED,
1831 MT_MEMORY_RW_DTCM,
1832- MT_MEMORY_RWX_ITCM,
1833+ MT_MEMORY_RX_ITCM,
1834 MT_MEMORY_RW_SO,
1835 MT_MEMORY_DMA_READY,
1836 };
1837diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1838index 563b92f..689d58e 100644
1839--- a/arch/arm/include/asm/outercache.h
1840+++ b/arch/arm/include/asm/outercache.h
1841@@ -39,7 +39,7 @@ struct outer_cache_fns {
1842 /* This is an ARM L2C thing */
1843 void (*write_sec)(unsigned long, unsigned);
1844 void (*configure)(const struct l2x0_regs *);
1845-};
1846+} __no_const;
1847
1848 extern struct outer_cache_fns outer_cache;
1849
1850diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1851index 4355f0e..cd9168e 100644
1852--- a/arch/arm/include/asm/page.h
1853+++ b/arch/arm/include/asm/page.h
1854@@ -23,6 +23,7 @@
1855
1856 #else
1857
1858+#include <linux/compiler.h>
1859 #include <asm/glue.h>
1860
1861 /*
1862@@ -114,7 +115,7 @@ struct cpu_user_fns {
1863 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1864 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1865 unsigned long vaddr, struct vm_area_struct *vma);
1866-};
1867+} __no_const;
1868
1869 #ifdef MULTI_USER
1870 extern struct cpu_user_fns cpu_user;
1871diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1872index 19cfab5..3f5c7e9 100644
1873--- a/arch/arm/include/asm/pgalloc.h
1874+++ b/arch/arm/include/asm/pgalloc.h
1875@@ -17,6 +17,7 @@
1876 #include <asm/processor.h>
1877 #include <asm/cacheflush.h>
1878 #include <asm/tlbflush.h>
1879+#include <asm/system_info.h>
1880
1881 #define check_pgt_cache() do { } while (0)
1882
1883@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1884 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1885 }
1886
1887+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1888+{
1889+ pud_populate(mm, pud, pmd);
1890+}
1891+
1892 #else /* !CONFIG_ARM_LPAE */
1893
1894 /*
1895@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1896 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1897 #define pmd_free(mm, pmd) do { } while (0)
1898 #define pud_populate(mm,pmd,pte) BUG()
1899+#define pud_populate_kernel(mm,pmd,pte) BUG()
1900
1901 #endif /* CONFIG_ARM_LPAE */
1902
1903@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1904 __free_page(pte);
1905 }
1906
1907+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1908+{
1909+#ifdef CONFIG_ARM_LPAE
1910+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1911+#else
1912+ if (addr & SECTION_SIZE)
1913+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1914+ else
1915+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1916+#endif
1917+ flush_pmd_entry(pmdp);
1918+}
1919+
1920 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1921 pmdval_t prot)
1922 {
1923diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1924index 5e68278..1869bae 100644
1925--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1926+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1927@@ -27,7 +27,7 @@
1928 /*
1929 * - section
1930 */
1931-#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1932+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1933 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1934 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1935 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1936@@ -39,6 +39,7 @@
1937 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1938 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1939 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1940+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1941
1942 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1943 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1944@@ -68,6 +69,7 @@
1945 * - extended small page/tiny page
1946 */
1947 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1948+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1949 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1950 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1951 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1952diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1953index bfd662e..f6cbb02 100644
1954--- a/arch/arm/include/asm/pgtable-2level.h
1955+++ b/arch/arm/include/asm/pgtable-2level.h
1956@@ -127,6 +127,9 @@
1957 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1958 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1959
1960+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
1961+#define L_PTE_PXN (_AT(pteval_t, 0))
1962+
1963 /*
1964 * These are the memory types, defined to be compatible with
1965 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
1966diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1967index a745a2a..481350a 100644
1968--- a/arch/arm/include/asm/pgtable-3level.h
1969+++ b/arch/arm/include/asm/pgtable-3level.h
1970@@ -80,6 +80,7 @@
1971 #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
1972 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1973 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1974+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1975 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1976 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
1977 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
1978@@ -91,10 +92,12 @@
1979 #define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
1980 #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
1981 #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
1982+#define PMD_SECT_RDONLY PMD_SECT_AP2
1983
1984 /*
1985 * To be used in assembly code with the upper page attributes.
1986 */
1987+#define L_PTE_PXN_HIGH (1 << (53 - 32))
1988 #define L_PTE_XN_HIGH (1 << (54 - 32))
1989 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1990
1991diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1992index f403541..b10df68 100644
1993--- a/arch/arm/include/asm/pgtable.h
1994+++ b/arch/arm/include/asm/pgtable.h
1995@@ -33,6 +33,9 @@
1996 #include <asm/pgtable-2level.h>
1997 #endif
1998
1999+#define ktla_ktva(addr) (addr)
2000+#define ktva_ktla(addr) (addr)
2001+
2002 /*
2003 * Just any arbitrary offset to the start of the vmalloc VM area: the
2004 * current 8MB value just means that there will be a 8MB "hole" after the
2005@@ -48,6 +51,9 @@
2006 #define LIBRARY_TEXT_START 0x0c000000
2007
2008 #ifndef __ASSEMBLY__
2009+extern pteval_t __supported_pte_mask;
2010+extern pmdval_t __supported_pmd_mask;
2011+
2012 extern void __pte_error(const char *file, int line, pte_t);
2013 extern void __pmd_error(const char *file, int line, pmd_t);
2014 extern void __pgd_error(const char *file, int line, pgd_t);
2015@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2016 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2017 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2018
2019+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2020+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2021+
2022+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2023+#include <asm/domain.h>
2024+#include <linux/thread_info.h>
2025+#include <linux/preempt.h>
2026+
2027+static inline int test_domain(int domain, int domaintype)
2028+{
2029+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2030+}
2031+#endif
2032+
2033+#ifdef CONFIG_PAX_KERNEXEC
2034+static inline unsigned long pax_open_kernel(void) {
2035+#ifdef CONFIG_ARM_LPAE
2036+ /* TODO */
2037+#else
2038+ preempt_disable();
2039+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2040+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2041+#endif
2042+ return 0;
2043+}
2044+
2045+static inline unsigned long pax_close_kernel(void) {
2046+#ifdef CONFIG_ARM_LPAE
2047+ /* TODO */
2048+#else
2049+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2050+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2051+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2052+ preempt_enable_no_resched();
2053+#endif
2054+ return 0;
2055+}
2056+#else
2057+static inline unsigned long pax_open_kernel(void) { return 0; }
2058+static inline unsigned long pax_close_kernel(void) { return 0; }
2059+#endif
2060+
2061 /*
2062 * This is the lowest virtual address we can permit any user space
2063 * mapping to be mapped at. This is particularly important for
2064@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2065 /*
2066 * The pgprot_* and protection_map entries will be fixed up in runtime
2067 * to include the cachable and bufferable bits based on memory policy,
2068- * as well as any architecture dependent bits like global/ASID and SMP
2069- * shared mapping bits.
2070+ * as well as any architecture dependent bits like global/ASID, PXN,
2071+ * and SMP shared mapping bits.
2072 */
2073 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2074
2075@@ -307,7 +355,7 @@ static inline pte_t pte_mknexec(pte_t pte)
2076 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2077 {
2078 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2079- L_PTE_NONE | L_PTE_VALID;
2080+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2081 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2082 return pte;
2083 }
2084diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2085index c25ef3e..735f14b 100644
2086--- a/arch/arm/include/asm/psci.h
2087+++ b/arch/arm/include/asm/psci.h
2088@@ -32,7 +32,7 @@ struct psci_operations {
2089 int (*affinity_info)(unsigned long target_affinity,
2090 unsigned long lowest_affinity_level);
2091 int (*migrate_info_type)(void);
2092-};
2093+} __no_const;
2094
2095 extern struct psci_operations psci_ops;
2096 extern struct smp_operations psci_smp_ops;
2097diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2098index 18f5a55..5072a40 100644
2099--- a/arch/arm/include/asm/smp.h
2100+++ b/arch/arm/include/asm/smp.h
2101@@ -107,7 +107,7 @@ struct smp_operations {
2102 int (*cpu_disable)(unsigned int cpu);
2103 #endif
2104 #endif
2105-};
2106+} __no_const;
2107
2108 struct of_cpu_method {
2109 const char *method;
2110diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2111index 72812a1..335f4f3 100644
2112--- a/arch/arm/include/asm/thread_info.h
2113+++ b/arch/arm/include/asm/thread_info.h
2114@@ -77,9 +77,9 @@ struct thread_info {
2115 .flags = 0, \
2116 .preempt_count = INIT_PREEMPT_COUNT, \
2117 .addr_limit = KERNEL_DS, \
2118- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2119- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2120- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2121+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2122+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2123+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2124 }
2125
2126 #define init_thread_info (init_thread_union.thread_info)
2127@@ -155,7 +155,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2128 #define TIF_SYSCALL_AUDIT 9
2129 #define TIF_SYSCALL_TRACEPOINT 10
2130 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2131-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2132+/* within 8 bits of TIF_SYSCALL_TRACE
2133+ * to meet flexible second operand requirements
2134+ */
2135+#define TIF_GRSEC_SETXID 12
2136+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2137 #define TIF_USING_IWMMXT 17
2138 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2139 #define TIF_RESTORE_SIGMASK 20
2140@@ -169,10 +173,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2141 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2142 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2143 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2144+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2145
2146 /* Checks for any syscall work in entry-common.S */
2147 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2148- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2149+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2150
2151 /*
2152 * Change these and you break ASM code in entry-common.S
2153diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
2154index 5f833f7..76e6644 100644
2155--- a/arch/arm/include/asm/tls.h
2156+++ b/arch/arm/include/asm/tls.h
2157@@ -3,6 +3,7 @@
2158
2159 #include <linux/compiler.h>
2160 #include <asm/thread_info.h>
2161+#include <asm/pgtable.h>
2162
2163 #ifdef __ASSEMBLY__
2164 #include <asm/asm-offsets.h>
2165@@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
2166 * at 0xffff0fe0 must be used instead. (see
2167 * entry-armv.S for details)
2168 */
2169+ pax_open_kernel();
2170 *((unsigned int *)0xffff0ff0) = val;
2171+ pax_close_kernel();
2172 #endif
2173 }
2174
2175diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2176index ce0786e..a80c264 100644
2177--- a/arch/arm/include/asm/uaccess.h
2178+++ b/arch/arm/include/asm/uaccess.h
2179@@ -18,6 +18,7 @@
2180 #include <asm/domain.h>
2181 #include <asm/unified.h>
2182 #include <asm/compiler.h>
2183+#include <asm/pgtable.h>
2184
2185 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2186 #include <asm-generic/uaccess-unaligned.h>
2187@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2188 static inline void set_fs(mm_segment_t fs)
2189 {
2190 current_thread_info()->addr_limit = fs;
2191- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2192+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2193 }
2194
2195 #define segment_eq(a, b) ((a) == (b))
2196
2197+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2198+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2199+
2200+static inline void pax_open_userland(void)
2201+{
2202+
2203+#ifdef CONFIG_PAX_MEMORY_UDEREF
2204+ if (segment_eq(get_fs(), USER_DS)) {
2205+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2206+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2207+ }
2208+#endif
2209+
2210+}
2211+
2212+static inline void pax_close_userland(void)
2213+{
2214+
2215+#ifdef CONFIG_PAX_MEMORY_UDEREF
2216+ if (segment_eq(get_fs(), USER_DS)) {
2217+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2218+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2219+ }
2220+#endif
2221+
2222+}
2223+
2224 #define __addr_ok(addr) ({ \
2225 unsigned long flag; \
2226 __asm__("cmp %2, %0; movlo %0, #0" \
2227@@ -198,8 +226,12 @@ extern int __get_user_64t_4(void *);
2228
2229 #define get_user(x, p) \
2230 ({ \
2231+ int __e; \
2232 might_fault(); \
2233- __get_user_check(x, p); \
2234+ pax_open_userland(); \
2235+ __e = __get_user_check((x), (p)); \
2236+ pax_close_userland(); \
2237+ __e; \
2238 })
2239
2240 extern int __put_user_1(void *, unsigned int);
2241@@ -244,8 +276,12 @@ extern int __put_user_8(void *, unsigned long long);
2242
2243 #define put_user(x, p) \
2244 ({ \
2245+ int __e; \
2246 might_fault(); \
2247- __put_user_check(x, p); \
2248+ pax_open_userland(); \
2249+ __e = __put_user_check((x), (p)); \
2250+ pax_close_userland(); \
2251+ __e; \
2252 })
2253
2254 #else /* CONFIG_MMU */
2255@@ -269,6 +305,7 @@ static inline void set_fs(mm_segment_t fs)
2256
2257 #endif /* CONFIG_MMU */
2258
2259+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
2260 #define access_ok(type, addr, size) (__range_ok(addr, size) == 0)
2261
2262 #define user_addr_max() \
2263@@ -286,13 +323,17 @@ static inline void set_fs(mm_segment_t fs)
2264 #define __get_user(x, ptr) \
2265 ({ \
2266 long __gu_err = 0; \
2267+ pax_open_userland(); \
2268 __get_user_err((x), (ptr), __gu_err); \
2269+ pax_close_userland(); \
2270 __gu_err; \
2271 })
2272
2273 #define __get_user_error(x, ptr, err) \
2274 ({ \
2275+ pax_open_userland(); \
2276 __get_user_err((x), (ptr), err); \
2277+ pax_close_userland(); \
2278 (void) 0; \
2279 })
2280
2281@@ -368,13 +409,17 @@ do { \
2282 #define __put_user(x, ptr) \
2283 ({ \
2284 long __pu_err = 0; \
2285+ pax_open_userland(); \
2286 __put_user_err((x), (ptr), __pu_err); \
2287+ pax_close_userland(); \
2288 __pu_err; \
2289 })
2290
2291 #define __put_user_error(x, ptr, err) \
2292 ({ \
2293+ pax_open_userland(); \
2294 __put_user_err((x), (ptr), err); \
2295+ pax_close_userland(); \
2296 (void) 0; \
2297 })
2298
2299@@ -474,11 +519,44 @@ do { \
2300
2301
2302 #ifdef CONFIG_MMU
2303-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2304-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2305+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2306+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2307+
2308+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2309+{
2310+ unsigned long ret;
2311+
2312+ check_object_size(to, n, false);
2313+ pax_open_userland();
2314+ ret = ___copy_from_user(to, from, n);
2315+ pax_close_userland();
2316+ return ret;
2317+}
2318+
2319+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2320+{
2321+ unsigned long ret;
2322+
2323+ check_object_size(from, n, true);
2324+ pax_open_userland();
2325+ ret = ___copy_to_user(to, from, n);
2326+ pax_close_userland();
2327+ return ret;
2328+}
2329+
2330 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2331-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2332+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2333 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2334+
2335+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2336+{
2337+ unsigned long ret;
2338+ pax_open_userland();
2339+ ret = ___clear_user(addr, n);
2340+ pax_close_userland();
2341+ return ret;
2342+}
2343+
2344 #else
2345 #define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
2346 #define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
2347@@ -487,6 +565,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2348
2349 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2350 {
2351+ if ((long)n < 0)
2352+ return n;
2353+
2354 if (access_ok(VERIFY_READ, from, n))
2355 n = __copy_from_user(to, from, n);
2356 else /* security hole - plug it */
2357@@ -496,6 +577,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2358
2359 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2360 {
2361+ if ((long)n < 0)
2362+ return n;
2363+
2364 if (access_ok(VERIFY_WRITE, to, n))
2365 n = __copy_to_user(to, from, n);
2366 return n;
2367diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2368index 5af0ed1..cea83883 100644
2369--- a/arch/arm/include/uapi/asm/ptrace.h
2370+++ b/arch/arm/include/uapi/asm/ptrace.h
2371@@ -92,7 +92,7 @@
2372 * ARMv7 groups of PSR bits
2373 */
2374 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2375-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2376+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2377 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2378 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2379
2380diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2381index a88671c..1cc895e 100644
2382--- a/arch/arm/kernel/armksyms.c
2383+++ b/arch/arm/kernel/armksyms.c
2384@@ -55,7 +55,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2385
2386 /* networking */
2387 EXPORT_SYMBOL(csum_partial);
2388-EXPORT_SYMBOL(csum_partial_copy_from_user);
2389+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2390 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2391 EXPORT_SYMBOL(__csum_ipv6_magic);
2392
2393@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero);
2394 #ifdef CONFIG_MMU
2395 EXPORT_SYMBOL(copy_page);
2396
2397-EXPORT_SYMBOL(__copy_from_user);
2398-EXPORT_SYMBOL(__copy_to_user);
2399-EXPORT_SYMBOL(__clear_user);
2400+EXPORT_SYMBOL(___copy_from_user);
2401+EXPORT_SYMBOL(___copy_to_user);
2402+EXPORT_SYMBOL(___clear_user);
2403
2404 EXPORT_SYMBOL(__get_user_1);
2405 EXPORT_SYMBOL(__get_user_2);
2406diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2407index 672b219..4aa120a 100644
2408--- a/arch/arm/kernel/entry-armv.S
2409+++ b/arch/arm/kernel/entry-armv.S
2410@@ -48,6 +48,87 @@
2411 9997:
2412 .endm
2413
2414+ .macro pax_enter_kernel
2415+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2416+ @ make aligned space for saved DACR
2417+ sub sp, sp, #8
2418+ @ save regs
2419+ stmdb sp!, {r1, r2}
2420+ @ read DACR from cpu_domain into r1
2421+ mov r2, sp
2422+ @ assume 8K pages, since we have to split the immediate in two
2423+ bic r2, r2, #(0x1fc0)
2424+ bic r2, r2, #(0x3f)
2425+ ldr r1, [r2, #TI_CPU_DOMAIN]
2426+ @ store old DACR on stack
2427+ str r1, [sp, #8]
2428+#ifdef CONFIG_PAX_KERNEXEC
2429+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2430+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2431+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2432+#endif
2433+#ifdef CONFIG_PAX_MEMORY_UDEREF
2434+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2435+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2436+#endif
2437+ @ write r1 to current_thread_info()->cpu_domain
2438+ str r1, [r2, #TI_CPU_DOMAIN]
2439+ @ write r1 to DACR
2440+ mcr p15, 0, r1, c3, c0, 0
2441+ @ instruction sync
2442+ instr_sync
2443+ @ restore regs
2444+ ldmia sp!, {r1, r2}
2445+#endif
2446+ .endm
2447+
2448+ .macro pax_open_userland
2449+#ifdef CONFIG_PAX_MEMORY_UDEREF
2450+ @ save regs
2451+ stmdb sp!, {r0, r1}
2452+ @ read DACR from cpu_domain into r1
2453+ mov r0, sp
2454+ @ assume 8K pages, since we have to split the immediate in two
2455+ bic r0, r0, #(0x1fc0)
2456+ bic r0, r0, #(0x3f)
2457+ ldr r1, [r0, #TI_CPU_DOMAIN]
2458+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2459+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2460+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2461+ @ write r1 to current_thread_info()->cpu_domain
2462+ str r1, [r0, #TI_CPU_DOMAIN]
2463+ @ write r1 to DACR
2464+ mcr p15, 0, r1, c3, c0, 0
2465+ @ instruction sync
2466+ instr_sync
2467+ @ restore regs
2468+ ldmia sp!, {r0, r1}
2469+#endif
2470+ .endm
2471+
2472+ .macro pax_close_userland
2473+#ifdef CONFIG_PAX_MEMORY_UDEREF
2474+ @ save regs
2475+ stmdb sp!, {r0, r1}
2476+ @ read DACR from cpu_domain into r1
2477+ mov r0, sp
2478+ @ assume 8K pages, since we have to split the immediate in two
2479+ bic r0, r0, #(0x1fc0)
2480+ bic r0, r0, #(0x3f)
2481+ ldr r1, [r0, #TI_CPU_DOMAIN]
2482+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2483+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2484+ @ write r1 to current_thread_info()->cpu_domain
2485+ str r1, [r0, #TI_CPU_DOMAIN]
2486+ @ write r1 to DACR
2487+ mcr p15, 0, r1, c3, c0, 0
2488+ @ instruction sync
2489+ instr_sync
2490+ @ restore regs
2491+ ldmia sp!, {r0, r1}
2492+#endif
2493+ .endm
2494+
2495 .macro pabt_helper
2496 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2497 #ifdef MULTI_PABORT
2498@@ -90,11 +171,15 @@
2499 * Invalid mode handlers
2500 */
2501 .macro inv_entry, reason
2502+
2503+ pax_enter_kernel
2504+
2505 sub sp, sp, #S_FRAME_SIZE
2506 ARM( stmib sp, {r1 - lr} )
2507 THUMB( stmia sp, {r0 - r12} )
2508 THUMB( str sp, [sp, #S_SP] )
2509 THUMB( str lr, [sp, #S_LR] )
2510+
2511 mov r1, #\reason
2512 .endm
2513
2514@@ -150,7 +235,11 @@ ENDPROC(__und_invalid)
2515 .macro svc_entry, stack_hole=0, trace=1
2516 UNWIND(.fnstart )
2517 UNWIND(.save {r0 - pc} )
2518+
2519+ pax_enter_kernel
2520+
2521 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2522+
2523 #ifdef CONFIG_THUMB2_KERNEL
2524 SPFIX( str r0, [sp] ) @ temporarily saved
2525 SPFIX( mov r0, sp )
2526@@ -165,7 +254,12 @@ ENDPROC(__und_invalid)
2527 ldmia r0, {r3 - r5}
2528 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2529 mov r6, #-1 @ "" "" "" ""
2530+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2531+ @ offset sp by 8 as done in pax_enter_kernel
2532+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2533+#else
2534 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2535+#endif
2536 SPFIX( addeq r2, r2, #4 )
2537 str r3, [sp, #-4]! @ save the "real" r0 copied
2538 @ from the exception stack
2539@@ -369,6 +463,9 @@ ENDPROC(__fiq_abt)
2540 .macro usr_entry, trace=1
2541 UNWIND(.fnstart )
2542 UNWIND(.cantunwind ) @ don't unwind the user space
2543+
2544+ pax_enter_kernel_user
2545+
2546 sub sp, sp, #S_FRAME_SIZE
2547 ARM( stmib sp, {r1 - r12} )
2548 THUMB( stmia sp, {r0 - r12} )
2549@@ -479,7 +576,9 @@ __und_usr:
2550 tst r3, #PSR_T_BIT @ Thumb mode?
2551 bne __und_usr_thumb
2552 sub r4, r2, #4 @ ARM instr at LR - 4
2553+ pax_open_userland
2554 1: ldrt r0, [r4]
2555+ pax_close_userland
2556 ARM_BE8(rev r0, r0) @ little endian instruction
2557
2558 @ r0 = 32-bit ARM instruction which caused the exception
2559@@ -513,11 +612,15 @@ __und_usr_thumb:
2560 */
2561 .arch armv6t2
2562 #endif
2563+ pax_open_userland
2564 2: ldrht r5, [r4]
2565+ pax_close_userland
2566 ARM_BE8(rev16 r5, r5) @ little endian instruction
2567 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2568 blo __und_usr_fault_16 @ 16bit undefined instruction
2569+ pax_open_userland
2570 3: ldrht r0, [r2]
2571+ pax_close_userland
2572 ARM_BE8(rev16 r0, r0) @ little endian instruction
2573 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2574 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2575@@ -547,7 +650,8 @@ ENDPROC(__und_usr)
2576 */
2577 .pushsection .fixup, "ax"
2578 .align 2
2579-4: str r4, [sp, #S_PC] @ retry current instruction
2580+4: pax_close_userland
2581+ str r4, [sp, #S_PC] @ retry current instruction
2582 ret r9
2583 .popsection
2584 .pushsection __ex_table,"a"
2585@@ -767,7 +871,7 @@ ENTRY(__switch_to)
2586 THUMB( str lr, [ip], #4 )
2587 ldr r4, [r2, #TI_TP_VALUE]
2588 ldr r5, [r2, #TI_TP_VALUE + 4]
2589-#ifdef CONFIG_CPU_USE_DOMAINS
2590+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2591 ldr r6, [r2, #TI_CPU_DOMAIN]
2592 #endif
2593 switch_tls r1, r4, r5, r3, r7
2594@@ -776,7 +880,7 @@ ENTRY(__switch_to)
2595 ldr r8, =__stack_chk_guard
2596 ldr r7, [r7, #TSK_STACK_CANARY]
2597 #endif
2598-#ifdef CONFIG_CPU_USE_DOMAINS
2599+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2600 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2601 #endif
2602 mov r5, r0
2603diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2604index f8ccc21..83d192f 100644
2605--- a/arch/arm/kernel/entry-common.S
2606+++ b/arch/arm/kernel/entry-common.S
2607@@ -11,18 +11,46 @@
2608 #include <asm/assembler.h>
2609 #include <asm/unistd.h>
2610 #include <asm/ftrace.h>
2611+#include <asm/domain.h>
2612 #include <asm/unwind.h>
2613
2614+#include "entry-header.S"
2615+
2616 #ifdef CONFIG_NEED_RET_TO_USER
2617 #include <mach/entry-macro.S>
2618 #else
2619 .macro arch_ret_to_user, tmp1, tmp2
2620+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2621+ @ save regs
2622+ stmdb sp!, {r1, r2}
2623+ @ read DACR from cpu_domain into r1
2624+ mov r2, sp
2625+ @ assume 8K pages, since we have to split the immediate in two
2626+ bic r2, r2, #(0x1fc0)
2627+ bic r2, r2, #(0x3f)
2628+ ldr r1, [r2, #TI_CPU_DOMAIN]
2629+#ifdef CONFIG_PAX_KERNEXEC
2630+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2631+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2632+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2633+#endif
2634+#ifdef CONFIG_PAX_MEMORY_UDEREF
2635+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2636+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2637+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2638+#endif
2639+ @ write r1 to current_thread_info()->cpu_domain
2640+ str r1, [r2, #TI_CPU_DOMAIN]
2641+ @ write r1 to DACR
2642+ mcr p15, 0, r1, c3, c0, 0
2643+ @ instruction sync
2644+ instr_sync
2645+ @ restore regs
2646+ ldmia sp!, {r1, r2}
2647+#endif
2648 .endm
2649 #endif
2650
2651-#include "entry-header.S"
2652-
2653-
2654 .align 5
2655 /*
2656 * This is the fast syscall return path. We do as little as
2657@@ -171,6 +199,12 @@ ENTRY(vector_swi)
2658 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
2659 #endif
2660
2661+ /*
2662+ * do this here to avoid a performance hit of wrapping the code above
2663+ * that directly dereferences userland to parse the SWI instruction
2664+ */
2665+ pax_enter_kernel_user
2666+
2667 adr tbl, sys_call_table @ load syscall table pointer
2668
2669 #if defined(CONFIG_OABI_COMPAT)
2670diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2671index 1a0045a..9b4f34d 100644
2672--- a/arch/arm/kernel/entry-header.S
2673+++ b/arch/arm/kernel/entry-header.S
2674@@ -196,6 +196,60 @@
2675 msr cpsr_c, \rtemp @ switch back to the SVC mode
2676 .endm
2677
2678+ .macro pax_enter_kernel_user
2679+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2680+ @ save regs
2681+ stmdb sp!, {r0, r1}
2682+ @ read DACR from cpu_domain into r1
2683+ mov r0, sp
2684+ @ assume 8K pages, since we have to split the immediate in two
2685+ bic r0, r0, #(0x1fc0)
2686+ bic r0, r0, #(0x3f)
2687+ ldr r1, [r0, #TI_CPU_DOMAIN]
2688+#ifdef CONFIG_PAX_MEMORY_UDEREF
2689+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2690+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2691+#endif
2692+#ifdef CONFIG_PAX_KERNEXEC
2693+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2694+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2695+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2696+#endif
2697+ @ write r1 to current_thread_info()->cpu_domain
2698+ str r1, [r0, #TI_CPU_DOMAIN]
2699+ @ write r1 to DACR
2700+ mcr p15, 0, r1, c3, c0, 0
2701+ @ instruction sync
2702+ instr_sync
2703+ @ restore regs
2704+ ldmia sp!, {r0, r1}
2705+#endif
2706+ .endm
2707+
2708+ .macro pax_exit_kernel
2709+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2710+ @ save regs
2711+ stmdb sp!, {r0, r1}
2712+ @ read old DACR from stack into r1
2713+ ldr r1, [sp, #(8 + S_SP)]
2714+ sub r1, r1, #8
2715+ ldr r1, [r1]
2716+
2717+ @ write r1 to current_thread_info()->cpu_domain
2718+ mov r0, sp
2719+ @ assume 8K pages, since we have to split the immediate in two
2720+ bic r0, r0, #(0x1fc0)
2721+ bic r0, r0, #(0x3f)
2722+ str r1, [r0, #TI_CPU_DOMAIN]
2723+ @ write r1 to DACR
2724+ mcr p15, 0, r1, c3, c0, 0
2725+ @ instruction sync
2726+ instr_sync
2727+ @ restore regs
2728+ ldmia sp!, {r0, r1}
2729+#endif
2730+ .endm
2731+
2732 #ifndef CONFIG_THUMB2_KERNEL
2733 .macro svc_exit, rpsr, irq = 0
2734 .if \irq != 0
2735@@ -215,6 +269,9 @@
2736 blne trace_hardirqs_off
2737 #endif
2738 .endif
2739+
2740+ pax_exit_kernel
2741+
2742 msr spsr_cxsf, \rpsr
2743 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
2744 @ We must avoid clrex due to Cortex-A15 erratum #830321
2745@@ -291,6 +348,9 @@
2746 blne trace_hardirqs_off
2747 #endif
2748 .endif
2749+
2750+ pax_exit_kernel
2751+
2752 ldr lr, [sp, #S_SP] @ top of the stack
2753 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2754
2755diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2756index 059c3da..8e45cfc 100644
2757--- a/arch/arm/kernel/fiq.c
2758+++ b/arch/arm/kernel/fiq.c
2759@@ -95,7 +95,10 @@ void set_fiq_handler(void *start, unsigned int length)
2760 void *base = vectors_page;
2761 unsigned offset = FIQ_OFFSET;
2762
2763+ pax_open_kernel();
2764 memcpy(base + offset, start, length);
2765+ pax_close_kernel();
2766+
2767 if (!cache_is_vipt_nonaliasing())
2768 flush_icache_range((unsigned long)base + offset, offset +
2769 length);
2770diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2771index 0196327..50ac8895 100644
2772--- a/arch/arm/kernel/head.S
2773+++ b/arch/arm/kernel/head.S
2774@@ -444,7 +444,7 @@ __enable_mmu:
2775 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2776 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2777 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2778- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2779+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2780 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2781 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2782 #endif
2783diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2784index 2e11961..07f0704 100644
2785--- a/arch/arm/kernel/module.c
2786+++ b/arch/arm/kernel/module.c
2787@@ -38,12 +38,39 @@
2788 #endif
2789
2790 #ifdef CONFIG_MMU
2791-void *module_alloc(unsigned long size)
2792+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2793 {
2794+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2795+ return NULL;
2796 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2797- GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
2798+ GFP_KERNEL, prot, 0, NUMA_NO_NODE,
2799 __builtin_return_address(0));
2800 }
2801+
2802+void *module_alloc(unsigned long size)
2803+{
2804+
2805+#ifdef CONFIG_PAX_KERNEXEC
2806+ return __module_alloc(size, PAGE_KERNEL);
2807+#else
2808+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2809+#endif
2810+
2811+}
2812+
2813+#ifdef CONFIG_PAX_KERNEXEC
2814+void module_memfree_exec(void *module_region)
2815+{
2816+ module_memfree(module_region);
2817+}
2818+EXPORT_SYMBOL(module_memfree_exec);
2819+
2820+void *module_alloc_exec(unsigned long size)
2821+{
2822+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2823+}
2824+EXPORT_SYMBOL(module_alloc_exec);
2825+#endif
2826 #endif
2827
2828 int
2829diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2830index 69bda1a..755113a 100644
2831--- a/arch/arm/kernel/patch.c
2832+++ b/arch/arm/kernel/patch.c
2833@@ -66,6 +66,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2834 else
2835 __acquire(&patch_lock);
2836
2837+ pax_open_kernel();
2838 if (thumb2 && __opcode_is_thumb16(insn)) {
2839 *(u16 *)waddr = __opcode_to_mem_thumb16(insn);
2840 size = sizeof(u16);
2841@@ -97,6 +98,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2842 *(u32 *)waddr = insn;
2843 size = sizeof(u32);
2844 }
2845+ pax_close_kernel();
2846
2847 if (waddr != addr) {
2848 flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
2849diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2850index 2bf1a16..d959d40 100644
2851--- a/arch/arm/kernel/process.c
2852+++ b/arch/arm/kernel/process.c
2853@@ -213,6 +213,7 @@ void machine_power_off(void)
2854
2855 if (pm_power_off)
2856 pm_power_off();
2857+ BUG();
2858 }
2859
2860 /*
2861@@ -226,7 +227,7 @@ void machine_power_off(void)
2862 * executing pre-reset code, and using RAM that the primary CPU's code wishes
2863 * to use. Implementing such co-ordination would be essentially impossible.
2864 */
2865-void machine_restart(char *cmd)
2866+__noreturn void machine_restart(char *cmd)
2867 {
2868 local_irq_disable();
2869 smp_send_stop();
2870@@ -252,8 +253,8 @@ void __show_regs(struct pt_regs *regs)
2871
2872 show_regs_print_info(KERN_DEFAULT);
2873
2874- print_symbol("PC is at %s\n", instruction_pointer(regs));
2875- print_symbol("LR is at %s\n", regs->ARM_lr);
2876+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
2877+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
2878 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2879 "sp : %08lx ip : %08lx fp : %08lx\n",
2880 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2881@@ -430,12 +431,6 @@ unsigned long get_wchan(struct task_struct *p)
2882 return 0;
2883 }
2884
2885-unsigned long arch_randomize_brk(struct mm_struct *mm)
2886-{
2887- unsigned long range_end = mm->brk + 0x02000000;
2888- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2889-}
2890-
2891 #ifdef CONFIG_MMU
2892 #ifdef CONFIG_KUSER_HELPERS
2893 /*
2894@@ -451,7 +446,7 @@ static struct vm_area_struct gate_vma = {
2895
2896 static int __init gate_vma_init(void)
2897 {
2898- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2899+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2900 return 0;
2901 }
2902 arch_initcall(gate_vma_init);
2903@@ -480,81 +475,13 @@ const char *arch_vma_name(struct vm_area_struct *vma)
2904 return is_gate_vma(vma) ? "[vectors]" : NULL;
2905 }
2906
2907-/* If possible, provide a placement hint at a random offset from the
2908- * stack for the signal page.
2909- */
2910-static unsigned long sigpage_addr(const struct mm_struct *mm,
2911- unsigned int npages)
2912-{
2913- unsigned long offset;
2914- unsigned long first;
2915- unsigned long last;
2916- unsigned long addr;
2917- unsigned int slots;
2918-
2919- first = PAGE_ALIGN(mm->start_stack);
2920-
2921- last = TASK_SIZE - (npages << PAGE_SHIFT);
2922-
2923- /* No room after stack? */
2924- if (first > last)
2925- return 0;
2926-
2927- /* Just enough room? */
2928- if (first == last)
2929- return first;
2930-
2931- slots = ((last - first) >> PAGE_SHIFT) + 1;
2932-
2933- offset = get_random_int() % slots;
2934-
2935- addr = first + (offset << PAGE_SHIFT);
2936-
2937- return addr;
2938-}
2939-
2940-static struct page *signal_page;
2941-extern struct page *get_signal_page(void);
2942-
2943-static const struct vm_special_mapping sigpage_mapping = {
2944- .name = "[sigpage]",
2945- .pages = &signal_page,
2946-};
2947-
2948 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2949 {
2950 struct mm_struct *mm = current->mm;
2951- struct vm_area_struct *vma;
2952- unsigned long addr;
2953- unsigned long hint;
2954- int ret = 0;
2955-
2956- if (!signal_page)
2957- signal_page = get_signal_page();
2958- if (!signal_page)
2959- return -ENOMEM;
2960
2961 down_write(&mm->mmap_sem);
2962- hint = sigpage_addr(mm, 1);
2963- addr = get_unmapped_area(NULL, hint, PAGE_SIZE, 0, 0);
2964- if (IS_ERR_VALUE(addr)) {
2965- ret = addr;
2966- goto up_fail;
2967- }
2968-
2969- vma = _install_special_mapping(mm, addr, PAGE_SIZE,
2970- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
2971- &sigpage_mapping);
2972-
2973- if (IS_ERR(vma)) {
2974- ret = PTR_ERR(vma);
2975- goto up_fail;
2976- }
2977-
2978- mm->context.sigpage = addr;
2979-
2980- up_fail:
2981+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
2982 up_write(&mm->mmap_sem);
2983- return ret;
2984+ return 0;
2985 }
2986 #endif
2987diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
2988index f90fdf4..24e8c84 100644
2989--- a/arch/arm/kernel/psci.c
2990+++ b/arch/arm/kernel/psci.c
2991@@ -26,7 +26,7 @@
2992 #include <asm/psci.h>
2993 #include <asm/system_misc.h>
2994
2995-struct psci_operations psci_ops;
2996+struct psci_operations psci_ops __read_only;
2997
2998 static int (*invoke_psci_fn)(u32, u32, u32, u32);
2999 typedef int (*psci_initcall_t)(const struct device_node *);
3000diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
3001index ef9119f..31995a3 100644
3002--- a/arch/arm/kernel/ptrace.c
3003+++ b/arch/arm/kernel/ptrace.c
3004@@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
3005 regs->ARM_ip = ip;
3006 }
3007
3008+#ifdef CONFIG_GRKERNSEC_SETXID
3009+extern void gr_delayed_cred_worker(void);
3010+#endif
3011+
3012 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3013 {
3014 current_thread_info()->syscall = scno;
3015
3016+#ifdef CONFIG_GRKERNSEC_SETXID
3017+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3018+ gr_delayed_cred_worker();
3019+#endif
3020+
3021 /* Do the secure computing check first; failures should be fast. */
3022 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
3023 if (secure_computing() == -1)
3024diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3025index 1d60beb..4aa25d5 100644
3026--- a/arch/arm/kernel/setup.c
3027+++ b/arch/arm/kernel/setup.c
3028@@ -105,21 +105,23 @@ EXPORT_SYMBOL(elf_hwcap);
3029 unsigned int elf_hwcap2 __read_mostly;
3030 EXPORT_SYMBOL(elf_hwcap2);
3031
3032+pteval_t __supported_pte_mask __read_only;
3033+pmdval_t __supported_pmd_mask __read_only;
3034
3035 #ifdef MULTI_CPU
3036-struct processor processor __read_mostly;
3037+struct processor processor __read_only;
3038 #endif
3039 #ifdef MULTI_TLB
3040-struct cpu_tlb_fns cpu_tlb __read_mostly;
3041+struct cpu_tlb_fns cpu_tlb __read_only;
3042 #endif
3043 #ifdef MULTI_USER
3044-struct cpu_user_fns cpu_user __read_mostly;
3045+struct cpu_user_fns cpu_user __read_only;
3046 #endif
3047 #ifdef MULTI_CACHE
3048-struct cpu_cache_fns cpu_cache __read_mostly;
3049+struct cpu_cache_fns cpu_cache __read_only;
3050 #endif
3051 #ifdef CONFIG_OUTER_CACHE
3052-struct outer_cache_fns outer_cache __read_mostly;
3053+struct outer_cache_fns outer_cache __read_only;
3054 EXPORT_SYMBOL(outer_cache);
3055 #endif
3056
3057@@ -250,9 +252,13 @@ static int __get_cpu_architecture(void)
3058 * Register 0 and check for VMSAv7 or PMSAv7 */
3059 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
3060 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3061- (mmfr0 & 0x000000f0) >= 0x00000030)
3062+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3063 cpu_arch = CPU_ARCH_ARMv7;
3064- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3065+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3066+ __supported_pte_mask |= L_PTE_PXN;
3067+ __supported_pmd_mask |= PMD_PXNTABLE;
3068+ }
3069+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3070 (mmfr0 & 0x000000f0) == 0x00000020)
3071 cpu_arch = CPU_ARCH_ARMv6;
3072 else
3073diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3074index 023ac90..0a69950 100644
3075--- a/arch/arm/kernel/signal.c
3076+++ b/arch/arm/kernel/signal.c
3077@@ -24,8 +24,6 @@
3078
3079 extern const unsigned long sigreturn_codes[7];
3080
3081-static unsigned long signal_return_offset;
3082-
3083 #ifdef CONFIG_CRUNCH
3084 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3085 {
3086@@ -396,8 +394,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3087 * except when the MPU has protected the vectors
3088 * page from PL0
3089 */
3090- retcode = mm->context.sigpage + signal_return_offset +
3091- (idx << 2) + thumb;
3092+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3093 } else
3094 #endif
3095 {
3096@@ -603,33 +600,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3097 } while (thread_flags & _TIF_WORK_MASK);
3098 return 0;
3099 }
3100-
3101-struct page *get_signal_page(void)
3102-{
3103- unsigned long ptr;
3104- unsigned offset;
3105- struct page *page;
3106- void *addr;
3107-
3108- page = alloc_pages(GFP_KERNEL, 0);
3109-
3110- if (!page)
3111- return NULL;
3112-
3113- addr = page_address(page);
3114-
3115- /* Give the signal return code some randomness */
3116- offset = 0x200 + (get_random_int() & 0x7fc);
3117- signal_return_offset = offset;
3118-
3119- /*
3120- * Copy signal return handlers into the vector page, and
3121- * set sigreturn to be a pointer to these.
3122- */
3123- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3124-
3125- ptr = (unsigned long)addr + offset;
3126- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3127-
3128- return page;
3129-}
3130diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3131index 86ef244..c518451 100644
3132--- a/arch/arm/kernel/smp.c
3133+++ b/arch/arm/kernel/smp.c
3134@@ -76,7 +76,7 @@ enum ipi_msg_type {
3135
3136 static DECLARE_COMPLETION(cpu_running);
3137
3138-static struct smp_operations smp_ops;
3139+static struct smp_operations smp_ops __read_only;
3140
3141 void __init smp_set_ops(struct smp_operations *ops)
3142 {
3143diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
3144index 7a3be1d..b00c7de 100644
3145--- a/arch/arm/kernel/tcm.c
3146+++ b/arch/arm/kernel/tcm.c
3147@@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = {
3148 .virtual = ITCM_OFFSET,
3149 .pfn = __phys_to_pfn(ITCM_OFFSET),
3150 .length = 0,
3151- .type = MT_MEMORY_RWX_ITCM,
3152+ .type = MT_MEMORY_RX_ITCM,
3153 }
3154 };
3155
3156@@ -267,7 +267,9 @@ no_dtcm:
3157 start = &__sitcm_text;
3158 end = &__eitcm_text;
3159 ram = &__itcm_start;
3160+ pax_open_kernel();
3161 memcpy(start, ram, itcm_code_sz);
3162+ pax_close_kernel();
3163 pr_debug("CPU ITCM: copied code from %p - %p\n",
3164 start, end);
3165 itcm_present = true;
3166diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3167index 788e23f..6fa06a1 100644
3168--- a/arch/arm/kernel/traps.c
3169+++ b/arch/arm/kernel/traps.c
3170@@ -65,7 +65,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3171 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3172 {
3173 #ifdef CONFIG_KALLSYMS
3174- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3175+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3176 #else
3177 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3178 #endif
3179@@ -267,6 +267,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3180 static int die_owner = -1;
3181 static unsigned int die_nest_count;
3182
3183+extern void gr_handle_kernel_exploit(void);
3184+
3185 static unsigned long oops_begin(void)
3186 {
3187 int cpu;
3188@@ -309,6 +311,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3189 panic("Fatal exception in interrupt");
3190 if (panic_on_oops)
3191 panic("Fatal exception");
3192+
3193+ gr_handle_kernel_exploit();
3194+
3195 if (signr)
3196 do_exit(signr);
3197 }
3198@@ -880,7 +885,11 @@ void __init early_trap_init(void *vectors_base)
3199 kuser_init(vectors_base);
3200
3201 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3202- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3203+
3204+#ifndef CONFIG_PAX_MEMORY_UDEREF
3205+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3206+#endif
3207+
3208 #else /* ifndef CONFIG_CPU_V7M */
3209 /*
3210 * on V7-M there is no need to copy the vector table to a dedicated
3211diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3212index b31aa73..cc4b7a1 100644
3213--- a/arch/arm/kernel/vmlinux.lds.S
3214+++ b/arch/arm/kernel/vmlinux.lds.S
3215@@ -37,7 +37,7 @@
3216 #endif
3217
3218 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3219- defined(CONFIG_GENERIC_BUG)
3220+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3221 #define ARM_EXIT_KEEP(x) x
3222 #define ARM_EXIT_DISCARD(x)
3223 #else
3224@@ -123,6 +123,8 @@ SECTIONS
3225 #ifdef CONFIG_DEBUG_RODATA
3226 . = ALIGN(1<<SECTION_SHIFT);
3227 #endif
3228+ _etext = .; /* End of text section */
3229+
3230 RO_DATA(PAGE_SIZE)
3231
3232 . = ALIGN(4);
3233@@ -153,8 +155,6 @@ SECTIONS
3234
3235 NOTES
3236
3237- _etext = .; /* End of text and rodata section */
3238-
3239 #ifndef CONFIG_XIP_KERNEL
3240 # ifdef CONFIG_ARM_KERNMEM_PERMS
3241 . = ALIGN(1<<SECTION_SHIFT);
3242diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3243index b652af5..60231ab 100644
3244--- a/arch/arm/kvm/arm.c
3245+++ b/arch/arm/kvm/arm.c
3246@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
3247 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3248
3249 /* The VMID used in the VTTBR */
3250-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3251+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3252 static u8 kvm_next_vmid;
3253 static DEFINE_SPINLOCK(kvm_vmid_lock);
3254
3255@@ -358,7 +358,7 @@ void force_vm_exit(const cpumask_t *mask)
3256 */
3257 static bool need_new_vmid_gen(struct kvm *kvm)
3258 {
3259- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3260+ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3261 }
3262
3263 /**
3264@@ -391,7 +391,7 @@ static void update_vttbr(struct kvm *kvm)
3265
3266 /* First user of a new VMID generation? */
3267 if (unlikely(kvm_next_vmid == 0)) {
3268- atomic64_inc(&kvm_vmid_gen);
3269+ atomic64_inc_unchecked(&kvm_vmid_gen);
3270 kvm_next_vmid = 1;
3271
3272 /*
3273@@ -408,7 +408,7 @@ static void update_vttbr(struct kvm *kvm)
3274 kvm_call_hyp(__kvm_flush_vm_context);
3275 }
3276
3277- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3278+ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3279 kvm->arch.vmid = kvm_next_vmid;
3280 kvm_next_vmid++;
3281
3282@@ -1087,7 +1087,7 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
3283 /**
3284 * Initialize Hyp-mode and memory mappings on all CPUs.
3285 */
3286-int kvm_arch_init(void *opaque)
3287+int kvm_arch_init(const void *opaque)
3288 {
3289 int err;
3290 int ret, cpu;
3291diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3292index 14a0d98..7771a7d 100644
3293--- a/arch/arm/lib/clear_user.S
3294+++ b/arch/arm/lib/clear_user.S
3295@@ -12,14 +12,14 @@
3296
3297 .text
3298
3299-/* Prototype: int __clear_user(void *addr, size_t sz)
3300+/* Prototype: int ___clear_user(void *addr, size_t sz)
3301 * Purpose : clear some user memory
3302 * Params : addr - user memory address to clear
3303 * : sz - number of bytes to clear
3304 * Returns : number of bytes NOT cleared
3305 */
3306 ENTRY(__clear_user_std)
3307-WEAK(__clear_user)
3308+WEAK(___clear_user)
3309 stmfd sp!, {r1, lr}
3310 mov r2, #0
3311 cmp r1, #4
3312@@ -44,7 +44,7 @@ WEAK(__clear_user)
3313 USER( strnebt r2, [r0])
3314 mov r0, #0
3315 ldmfd sp!, {r1, pc}
3316-ENDPROC(__clear_user)
3317+ENDPROC(___clear_user)
3318 ENDPROC(__clear_user_std)
3319
3320 .pushsection .fixup,"ax"
3321diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3322index 7a235b9..73a0556 100644
3323--- a/arch/arm/lib/copy_from_user.S
3324+++ b/arch/arm/lib/copy_from_user.S
3325@@ -17,7 +17,7 @@
3326 /*
3327 * Prototype:
3328 *
3329- * size_t __copy_from_user(void *to, const void *from, size_t n)
3330+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3331 *
3332 * Purpose:
3333 *
3334@@ -89,11 +89,11 @@
3335
3336 .text
3337
3338-ENTRY(__copy_from_user)
3339+ENTRY(___copy_from_user)
3340
3341 #include "copy_template.S"
3342
3343-ENDPROC(__copy_from_user)
3344+ENDPROC(___copy_from_user)
3345
3346 .pushsection .fixup,"ax"
3347 .align 0
3348diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3349index 6ee2f67..d1cce76 100644
3350--- a/arch/arm/lib/copy_page.S
3351+++ b/arch/arm/lib/copy_page.S
3352@@ -10,6 +10,7 @@
3353 * ASM optimised string functions
3354 */
3355 #include <linux/linkage.h>
3356+#include <linux/const.h>
3357 #include <asm/assembler.h>
3358 #include <asm/asm-offsets.h>
3359 #include <asm/cache.h>
3360diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3361index a9d3db1..164b089 100644
3362--- a/arch/arm/lib/copy_to_user.S
3363+++ b/arch/arm/lib/copy_to_user.S
3364@@ -17,7 +17,7 @@
3365 /*
3366 * Prototype:
3367 *
3368- * size_t __copy_to_user(void *to, const void *from, size_t n)
3369+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3370 *
3371 * Purpose:
3372 *
3373@@ -93,11 +93,11 @@
3374 .text
3375
3376 ENTRY(__copy_to_user_std)
3377-WEAK(__copy_to_user)
3378+WEAK(___copy_to_user)
3379
3380 #include "copy_template.S"
3381
3382-ENDPROC(__copy_to_user)
3383+ENDPROC(___copy_to_user)
3384 ENDPROC(__copy_to_user_std)
3385
3386 .pushsection .fixup,"ax"
3387diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3388index 7d08b43..f7ca7ea 100644
3389--- a/arch/arm/lib/csumpartialcopyuser.S
3390+++ b/arch/arm/lib/csumpartialcopyuser.S
3391@@ -57,8 +57,8 @@
3392 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3393 */
3394
3395-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3396-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3397+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3398+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3399
3400 #include "csumpartialcopygeneric.S"
3401
3402diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3403index 312d43e..21d2322 100644
3404--- a/arch/arm/lib/delay.c
3405+++ b/arch/arm/lib/delay.c
3406@@ -29,7 +29,7 @@
3407 /*
3408 * Default to the loop-based delay implementation.
3409 */
3410-struct arm_delay_ops arm_delay_ops = {
3411+struct arm_delay_ops arm_delay_ops __read_only = {
3412 .delay = __loop_delay,
3413 .const_udelay = __loop_const_udelay,
3414 .udelay = __loop_udelay,
3415diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3416index 3e58d71..029817c 100644
3417--- a/arch/arm/lib/uaccess_with_memcpy.c
3418+++ b/arch/arm/lib/uaccess_with_memcpy.c
3419@@ -136,7 +136,7 @@ out:
3420 }
3421
3422 unsigned long
3423-__copy_to_user(void __user *to, const void *from, unsigned long n)
3424+___copy_to_user(void __user *to, const void *from, unsigned long n)
3425 {
3426 /*
3427 * This test is stubbed out of the main function above to keep
3428@@ -190,7 +190,7 @@ out:
3429 return n;
3430 }
3431
3432-unsigned long __clear_user(void __user *addr, unsigned long n)
3433+unsigned long ___clear_user(void __user *addr, unsigned long n)
3434 {
3435 /* See rational for this in __copy_to_user() above. */
3436 if (n < 64)
3437diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
3438index 318d127..9aab0d1 100644
3439--- a/arch/arm/mach-exynos/suspend.c
3440+++ b/arch/arm/mach-exynos/suspend.c
3441@@ -18,6 +18,7 @@
3442 #include <linux/syscore_ops.h>
3443 #include <linux/cpu_pm.h>
3444 #include <linux/io.h>
3445+#include <linux/irq.h>
3446 #include <linux/irqchip/arm-gic.h>
3447 #include <linux/err.h>
3448 #include <linux/regulator/machine.h>
3449@@ -632,8 +633,10 @@ void __init exynos_pm_init(void)
3450 tmp |= pm_data->wake_disable_mask;
3451 pmu_raw_writel(tmp, S5P_WAKEUP_MASK);
3452
3453- exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3454- exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3455+ pax_open_kernel();
3456+ *(void **)&exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3457+ *(void **)&exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3458+ pax_close_kernel();
3459
3460 register_syscore_ops(&exynos_pm_syscore_ops);
3461 suspend_set_ops(&exynos_suspend_ops);
3462diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
3463index 0662087..004d163 100644
3464--- a/arch/arm/mach-keystone/keystone.c
3465+++ b/arch/arm/mach-keystone/keystone.c
3466@@ -27,7 +27,7 @@
3467
3468 #include "keystone.h"
3469
3470-static struct notifier_block platform_nb;
3471+static notifier_block_no_const platform_nb;
3472 static unsigned long keystone_dma_pfn_offset __read_mostly;
3473
3474 static int keystone_platform_notifier(struct notifier_block *nb,
3475diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
3476index e46e9ea..9141c83 100644
3477--- a/arch/arm/mach-mvebu/coherency.c
3478+++ b/arch/arm/mach-mvebu/coherency.c
3479@@ -117,7 +117,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
3480
3481 /*
3482 * This ioremap hook is used on Armada 375/38x to ensure that PCIe
3483- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
3484+ * memory areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This
3485 * is needed as a workaround for a deadlock issue between the PCIe
3486 * interface and the cache controller.
3487 */
3488@@ -130,7 +130,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
3489 mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
3490
3491 if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
3492- mtype = MT_UNCACHED;
3493+ mtype = MT_UNCACHED_RW;
3494
3495 return __arm_ioremap_caller(phys_addr, size, mtype, caller);
3496 }
3497diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3498index b6443a4..20a0b74 100644
3499--- a/arch/arm/mach-omap2/board-n8x0.c
3500+++ b/arch/arm/mach-omap2/board-n8x0.c
3501@@ -569,7 +569,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3502 }
3503 #endif
3504
3505-struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3506+struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3507 .late_init = n8x0_menelaus_late_init,
3508 };
3509
3510diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3511index 79f49d9..70bf184 100644
3512--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3513+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3514@@ -86,7 +86,7 @@ struct cpu_pm_ops {
3515 void (*resume)(void);
3516 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
3517 void (*hotplug_restart)(void);
3518-};
3519+} __no_const;
3520
3521 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
3522 static struct powerdomain *mpuss_pd;
3523@@ -105,7 +105,7 @@ static void dummy_cpu_resume(void)
3524 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
3525 {}
3526
3527-struct cpu_pm_ops omap_pm_ops = {
3528+static struct cpu_pm_ops omap_pm_ops __read_only = {
3529 .finish_suspend = default_finish_suspend,
3530 .resume = dummy_cpu_resume,
3531 .scu_prepare = dummy_scu_prepare,
3532diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
3533index 5305ec7..6d74045 100644
3534--- a/arch/arm/mach-omap2/omap-smp.c
3535+++ b/arch/arm/mach-omap2/omap-smp.c
3536@@ -19,6 +19,7 @@
3537 #include <linux/device.h>
3538 #include <linux/smp.h>
3539 #include <linux/io.h>
3540+#include <linux/irq.h>
3541 #include <linux/irqchip/arm-gic.h>
3542
3543 #include <asm/smp_scu.h>
3544diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3545index f961c46..4a453dc 100644
3546--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3547+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3548@@ -344,7 +344,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
3549 return NOTIFY_OK;
3550 }
3551
3552-static struct notifier_block __refdata irq_hotplug_notifier = {
3553+static struct notifier_block irq_hotplug_notifier = {
3554 .notifier_call = irq_cpu_hotplug_notify,
3555 };
3556
3557diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3558index be9541e..821805f 100644
3559--- a/arch/arm/mach-omap2/omap_device.c
3560+++ b/arch/arm/mach-omap2/omap_device.c
3561@@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od)
3562 struct platform_device __init *omap_device_build(const char *pdev_name,
3563 int pdev_id,
3564 struct omap_hwmod *oh,
3565- void *pdata, int pdata_len)
3566+ const void *pdata, int pdata_len)
3567 {
3568 struct omap_hwmod *ohs[] = { oh };
3569
3570@@ -538,7 +538,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3571 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3572 int pdev_id,
3573 struct omap_hwmod **ohs,
3574- int oh_cnt, void *pdata,
3575+ int oh_cnt, const void *pdata,
3576 int pdata_len)
3577 {
3578 int ret = -ENOMEM;
3579diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3580index 78c02b3..c94109a 100644
3581--- a/arch/arm/mach-omap2/omap_device.h
3582+++ b/arch/arm/mach-omap2/omap_device.h
3583@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3584 /* Core code interface */
3585
3586 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3587- struct omap_hwmod *oh, void *pdata,
3588+ struct omap_hwmod *oh, const void *pdata,
3589 int pdata_len);
3590
3591 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3592 struct omap_hwmod **oh, int oh_cnt,
3593- void *pdata, int pdata_len);
3594+ const void *pdata, int pdata_len);
3595
3596 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3597 struct omap_hwmod **ohs, int oh_cnt);
3598diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3599index 355b089..2c9d7c3 100644
3600--- a/arch/arm/mach-omap2/omap_hwmod.c
3601+++ b/arch/arm/mach-omap2/omap_hwmod.c
3602@@ -193,10 +193,10 @@ struct omap_hwmod_soc_ops {
3603 int (*init_clkdm)(struct omap_hwmod *oh);
3604 void (*update_context_lost)(struct omap_hwmod *oh);
3605 int (*get_context_lost)(struct omap_hwmod *oh);
3606-};
3607+} __no_const;
3608
3609 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3610-static struct omap_hwmod_soc_ops soc_ops;
3611+static struct omap_hwmod_soc_ops soc_ops __read_only;
3612
3613 /* omap_hwmod_list contains all registered struct omap_hwmods */
3614 static LIST_HEAD(omap_hwmod_list);
3615diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
3616index 95fee54..cfa9cf1 100644
3617--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
3618+++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
3619@@ -10,6 +10,7 @@
3620
3621 #include <linux/kernel.h>
3622 #include <linux/init.h>
3623+#include <asm/pgtable.h>
3624
3625 #include "powerdomain.h"
3626
3627@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
3628
3629 void __init am43xx_powerdomains_init(void)
3630 {
3631- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3632+ pax_open_kernel();
3633+ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3634+ pax_close_kernel();
3635 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
3636 pwrdm_register_pwrdms(powerdomains_am43xx);
3637 pwrdm_complete_init();
3638diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3639index ff0a68c..b312aa0 100644
3640--- a/arch/arm/mach-omap2/wd_timer.c
3641+++ b/arch/arm/mach-omap2/wd_timer.c
3642@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3643 struct omap_hwmod *oh;
3644 char *oh_name = "wd_timer2";
3645 char *dev_name = "omap_wdt";
3646- struct omap_wd_timer_platform_data pdata;
3647+ static struct omap_wd_timer_platform_data pdata = {
3648+ .read_reset_sources = prm_read_reset_sources
3649+ };
3650
3651 if (!cpu_class_is_omap2() || of_have_populated_dt())
3652 return 0;
3653@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3654 return -EINVAL;
3655 }
3656
3657- pdata.read_reset_sources = prm_read_reset_sources;
3658-
3659 pdev = omap_device_build(dev_name, id, oh, &pdata,
3660 sizeof(struct omap_wd_timer_platform_data));
3661 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3662diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3663index 4f25a7c..a81be85 100644
3664--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3665+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3666@@ -179,7 +179,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3667 bool entered_lp2 = false;
3668
3669 if (tegra_pending_sgi())
3670- ACCESS_ONCE(abort_flag) = true;
3671+ ACCESS_ONCE_RW(abort_flag) = true;
3672
3673 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3674
3675diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
3676index ab95f53..4b977a7 100644
3677--- a/arch/arm/mach-tegra/irq.c
3678+++ b/arch/arm/mach-tegra/irq.c
3679@@ -20,6 +20,7 @@
3680 #include <linux/cpu_pm.h>
3681 #include <linux/interrupt.h>
3682 #include <linux/io.h>
3683+#include <linux/irq.h>
3684 #include <linux/irqchip/arm-gic.h>
3685 #include <linux/irq.h>
3686 #include <linux/kernel.h>
3687diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
3688index 2cb587b..6ddfebf 100644
3689--- a/arch/arm/mach-ux500/pm.c
3690+++ b/arch/arm/mach-ux500/pm.c
3691@@ -10,6 +10,7 @@
3692 */
3693
3694 #include <linux/kernel.h>
3695+#include <linux/irq.h>
3696 #include <linux/irqchip/arm-gic.h>
3697 #include <linux/delay.h>
3698 #include <linux/io.h>
3699diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3700index 2dea8b5..6499da2 100644
3701--- a/arch/arm/mach-ux500/setup.h
3702+++ b/arch/arm/mach-ux500/setup.h
3703@@ -33,13 +33,6 @@ extern void ux500_timer_init(void);
3704 .type = MT_DEVICE, \
3705 }
3706
3707-#define __MEM_DEV_DESC(x, sz) { \
3708- .virtual = IO_ADDRESS(x), \
3709- .pfn = __phys_to_pfn(x), \
3710- .length = sz, \
3711- .type = MT_MEMORY_RWX, \
3712-}
3713-
3714 extern struct smp_operations ux500_smp_ops;
3715 extern void ux500_cpu_die(unsigned int cpu);
3716
3717diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
3718index 52d768f..5f93180 100644
3719--- a/arch/arm/mach-zynq/platsmp.c
3720+++ b/arch/arm/mach-zynq/platsmp.c
3721@@ -24,6 +24,7 @@
3722 #include <linux/io.h>
3723 #include <asm/cacheflush.h>
3724 #include <asm/smp_scu.h>
3725+#include <linux/irq.h>
3726 #include <linux/irqchip/arm-gic.h>
3727 #include "common.h"
3728
3729diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3730index 9b4f29e..bbf3bfa 100644
3731--- a/arch/arm/mm/Kconfig
3732+++ b/arch/arm/mm/Kconfig
3733@@ -446,6 +446,7 @@ config CPU_32v5
3734
3735 config CPU_32v6
3736 bool
3737+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3738 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3739
3740 config CPU_32v6K
3741@@ -600,6 +601,7 @@ config CPU_CP15_MPU
3742
3743 config CPU_USE_DOMAINS
3744 bool
3745+ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3746 help
3747 This option enables or disables the use of domain switching
3748 via the set_fs() function.
3749@@ -798,7 +800,7 @@ config NEED_KUSER_HELPERS
3750
3751 config KUSER_HELPERS
3752 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3753- depends on MMU
3754+ depends on MMU && (!(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND)
3755 default y
3756 help
3757 Warning: disabling this option may break user programs.
3758@@ -812,7 +814,7 @@ config KUSER_HELPERS
3759 See Documentation/arm/kernel_user_helpers.txt for details.
3760
3761 However, the fixed address nature of these helpers can be used
3762- by ROP (return orientated programming) authors when creating
3763+ by ROP (Return Oriented Programming) authors when creating
3764 exploits.
3765
3766 If all of the binaries and libraries which run on your platform
3767diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3768index 2c0c541..4585df9 100644
3769--- a/arch/arm/mm/alignment.c
3770+++ b/arch/arm/mm/alignment.c
3771@@ -216,10 +216,12 @@ union offset_union {
3772 #define __get16_unaligned_check(ins,val,addr) \
3773 do { \
3774 unsigned int err = 0, v, a = addr; \
3775+ pax_open_userland(); \
3776 __get8_unaligned_check(ins,v,a,err); \
3777 val = v << ((BE) ? 8 : 0); \
3778 __get8_unaligned_check(ins,v,a,err); \
3779 val |= v << ((BE) ? 0 : 8); \
3780+ pax_close_userland(); \
3781 if (err) \
3782 goto fault; \
3783 } while (0)
3784@@ -233,6 +235,7 @@ union offset_union {
3785 #define __get32_unaligned_check(ins,val,addr) \
3786 do { \
3787 unsigned int err = 0, v, a = addr; \
3788+ pax_open_userland(); \
3789 __get8_unaligned_check(ins,v,a,err); \
3790 val = v << ((BE) ? 24 : 0); \
3791 __get8_unaligned_check(ins,v,a,err); \
3792@@ -241,6 +244,7 @@ union offset_union {
3793 val |= v << ((BE) ? 8 : 16); \
3794 __get8_unaligned_check(ins,v,a,err); \
3795 val |= v << ((BE) ? 0 : 24); \
3796+ pax_close_userland(); \
3797 if (err) \
3798 goto fault; \
3799 } while (0)
3800@@ -254,6 +258,7 @@ union offset_union {
3801 #define __put16_unaligned_check(ins,val,addr) \
3802 do { \
3803 unsigned int err = 0, v = val, a = addr; \
3804+ pax_open_userland(); \
3805 __asm__( FIRST_BYTE_16 \
3806 ARM( "1: "ins" %1, [%2], #1\n" ) \
3807 THUMB( "1: "ins" %1, [%2]\n" ) \
3808@@ -273,6 +278,7 @@ union offset_union {
3809 " .popsection\n" \
3810 : "=r" (err), "=&r" (v), "=&r" (a) \
3811 : "0" (err), "1" (v), "2" (a)); \
3812+ pax_close_userland(); \
3813 if (err) \
3814 goto fault; \
3815 } while (0)
3816@@ -286,6 +292,7 @@ union offset_union {
3817 #define __put32_unaligned_check(ins,val,addr) \
3818 do { \
3819 unsigned int err = 0, v = val, a = addr; \
3820+ pax_open_userland(); \
3821 __asm__( FIRST_BYTE_32 \
3822 ARM( "1: "ins" %1, [%2], #1\n" ) \
3823 THUMB( "1: "ins" %1, [%2]\n" ) \
3824@@ -315,6 +322,7 @@ union offset_union {
3825 " .popsection\n" \
3826 : "=r" (err), "=&r" (v), "=&r" (a) \
3827 : "0" (err), "1" (v), "2" (a)); \
3828+ pax_close_userland(); \
3829 if (err) \
3830 goto fault; \
3831 } while (0)
3832diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
3833index 8f15f70..d599a2b 100644
3834--- a/arch/arm/mm/cache-l2x0.c
3835+++ b/arch/arm/mm/cache-l2x0.c
3836@@ -43,7 +43,7 @@ struct l2c_init_data {
3837 void (*save)(void __iomem *);
3838 void (*configure)(void __iomem *);
3839 struct outer_cache_fns outer_cache;
3840-};
3841+} __do_const;
3842
3843 #define CACHE_LINE_SIZE 32
3844
3845diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
3846index 845769e..4278fd7 100644
3847--- a/arch/arm/mm/context.c
3848+++ b/arch/arm/mm/context.c
3849@@ -43,7 +43,7 @@
3850 #define NUM_USER_ASIDS ASID_FIRST_VERSION
3851
3852 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
3853-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3854+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3855 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
3856
3857 static DEFINE_PER_CPU(atomic64_t, active_asids);
3858@@ -178,7 +178,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3859 {
3860 static u32 cur_idx = 1;
3861 u64 asid = atomic64_read(&mm->context.id);
3862- u64 generation = atomic64_read(&asid_generation);
3863+ u64 generation = atomic64_read_unchecked(&asid_generation);
3864
3865 if (asid != 0) {
3866 /*
3867@@ -208,7 +208,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3868 */
3869 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
3870 if (asid == NUM_USER_ASIDS) {
3871- generation = atomic64_add_return(ASID_FIRST_VERSION,
3872+ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
3873 &asid_generation);
3874 flush_context(cpu);
3875 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
3876@@ -240,14 +240,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
3877 cpu_set_reserved_ttbr0();
3878
3879 asid = atomic64_read(&mm->context.id);
3880- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
3881+ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
3882 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
3883 goto switch_mm_fastpath;
3884
3885 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
3886 /* Check that our ASID belongs to the current generation. */
3887 asid = atomic64_read(&mm->context.id);
3888- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
3889+ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
3890 asid = new_context(mm, cpu);
3891 atomic64_set(&mm->context.id, asid);
3892 }
3893diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3894index 6333d9c..fd09b46 100644
3895--- a/arch/arm/mm/fault.c
3896+++ b/arch/arm/mm/fault.c
3897@@ -25,6 +25,7 @@
3898 #include <asm/system_misc.h>
3899 #include <asm/system_info.h>
3900 #include <asm/tlbflush.h>
3901+#include <asm/sections.h>
3902
3903 #include "fault.h"
3904
3905@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3906 if (fixup_exception(regs))
3907 return;
3908
3909+#ifdef CONFIG_PAX_MEMORY_UDEREF
3910+ if (addr < TASK_SIZE) {
3911+ if (current->signal->curr_ip)
3912+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3913+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3914+ else
3915+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3916+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3917+ }
3918+#endif
3919+
3920+#ifdef CONFIG_PAX_KERNEXEC
3921+ if ((fsr & FSR_WRITE) &&
3922+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3923+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3924+ {
3925+ if (current->signal->curr_ip)
3926+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3927+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3928+ else
3929+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3930+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3931+ }
3932+#endif
3933+
3934 /*
3935 * No handler, we'll have to terminate things with extreme prejudice.
3936 */
3937@@ -173,6 +199,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3938 }
3939 #endif
3940
3941+#ifdef CONFIG_PAX_PAGEEXEC
3942+ if (fsr & FSR_LNX_PF) {
3943+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3944+ do_group_exit(SIGKILL);
3945+ }
3946+#endif
3947+
3948 tsk->thread.address = addr;
3949 tsk->thread.error_code = fsr;
3950 tsk->thread.trap_no = 14;
3951@@ -400,6 +433,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3952 }
3953 #endif /* CONFIG_MMU */
3954
3955+#ifdef CONFIG_PAX_PAGEEXEC
3956+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3957+{
3958+ long i;
3959+
3960+ printk(KERN_ERR "PAX: bytes at PC: ");
3961+ for (i = 0; i < 20; i++) {
3962+ unsigned char c;
3963+ if (get_user(c, (__force unsigned char __user *)pc+i))
3964+ printk(KERN_CONT "?? ");
3965+ else
3966+ printk(KERN_CONT "%02x ", c);
3967+ }
3968+ printk("\n");
3969+
3970+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3971+ for (i = -1; i < 20; i++) {
3972+ unsigned long c;
3973+ if (get_user(c, (__force unsigned long __user *)sp+i))
3974+ printk(KERN_CONT "???????? ");
3975+ else
3976+ printk(KERN_CONT "%08lx ", c);
3977+ }
3978+ printk("\n");
3979+}
3980+#endif
3981+
3982 /*
3983 * First Level Translation Fault Handler
3984 *
3985@@ -547,9 +607,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3986 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3987 struct siginfo info;
3988
3989+#ifdef CONFIG_PAX_MEMORY_UDEREF
3990+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3991+ if (current->signal->curr_ip)
3992+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3993+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3994+ else
3995+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3996+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3997+ goto die;
3998+ }
3999+#endif
4000+
4001 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
4002 return;
4003
4004+die:
4005 pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
4006 inf->name, fsr, addr);
4007 show_pte(current->mm, addr);
4008@@ -574,15 +647,104 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
4009 ifsr_info[nr].name = name;
4010 }
4011
4012+asmlinkage int sys_sigreturn(struct pt_regs *regs);
4013+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
4014+
4015 asmlinkage void __exception
4016 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
4017 {
4018 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
4019 struct siginfo info;
4020+ unsigned long pc = instruction_pointer(regs);
4021+
4022+ if (user_mode(regs)) {
4023+ unsigned long sigpage = current->mm->context.sigpage;
4024+
4025+ if (sigpage <= pc && pc < sigpage + 7*4) {
4026+ if (pc < sigpage + 3*4)
4027+ sys_sigreturn(regs);
4028+ else
4029+ sys_rt_sigreturn(regs);
4030+ return;
4031+ }
4032+ if (pc == 0xffff0f60UL) {
4033+ /*
4034+ * PaX: __kuser_cmpxchg64 emulation
4035+ */
4036+ // TODO
4037+ //regs->ARM_pc = regs->ARM_lr;
4038+ //return;
4039+ }
4040+ if (pc == 0xffff0fa0UL) {
4041+ /*
4042+ * PaX: __kuser_memory_barrier emulation
4043+ */
4044+ // dmb(); implied by the exception
4045+ regs->ARM_pc = regs->ARM_lr;
4046+ return;
4047+ }
4048+ if (pc == 0xffff0fc0UL) {
4049+ /*
4050+ * PaX: __kuser_cmpxchg emulation
4051+ */
4052+ // TODO
4053+ //long new;
4054+ //int op;
4055+
4056+ //op = FUTEX_OP_SET << 28;
4057+ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
4058+ //regs->ARM_r0 = old != new;
4059+ //regs->ARM_pc = regs->ARM_lr;
4060+ //return;
4061+ }
4062+ if (pc == 0xffff0fe0UL) {
4063+ /*
4064+ * PaX: __kuser_get_tls emulation
4065+ */
4066+ regs->ARM_r0 = current_thread_info()->tp_value[0];
4067+ regs->ARM_pc = regs->ARM_lr;
4068+ return;
4069+ }
4070+ }
4071+
4072+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4073+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
4074+ if (current->signal->curr_ip)
4075+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4076+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4077+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4078+ else
4079+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4080+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4081+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4082+ goto die;
4083+ }
4084+#endif
4085+
4086+#ifdef CONFIG_PAX_REFCOUNT
4087+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4088+#ifdef CONFIG_THUMB2_KERNEL
4089+ unsigned short bkpt;
4090+
4091+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) {
4092+#else
4093+ unsigned int bkpt;
4094+
4095+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4096+#endif
4097+ current->thread.error_code = ifsr;
4098+ current->thread.trap_no = 0;
4099+ pax_report_refcount_overflow(regs);
4100+ fixup_exception(regs);
4101+ return;
4102+ }
4103+ }
4104+#endif
4105
4106 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4107 return;
4108
4109+die:
4110 pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4111 inf->name, ifsr, addr);
4112
4113diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4114index cf08bdf..772656c 100644
4115--- a/arch/arm/mm/fault.h
4116+++ b/arch/arm/mm/fault.h
4117@@ -3,6 +3,7 @@
4118
4119 /*
4120 * Fault status register encodings. We steal bit 31 for our own purposes.
4121+ * Set when the FSR value is from an instruction fault.
4122 */
4123 #define FSR_LNX_PF (1 << 31)
4124 #define FSR_WRITE (1 << 11)
4125@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4126 }
4127 #endif
4128
4129+/* valid for LPAE and !LPAE */
4130+static inline int is_xn_fault(unsigned int fsr)
4131+{
4132+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4133+}
4134+
4135+static inline int is_domain_fault(unsigned int fsr)
4136+{
4137+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4138+}
4139+
4140 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4141 unsigned long search_exception_table(unsigned long addr);
4142
4143diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4144index 1609b02..def0785 100644
4145--- a/arch/arm/mm/init.c
4146+++ b/arch/arm/mm/init.c
4147@@ -755,7 +755,46 @@ void free_tcmmem(void)
4148 {
4149 #ifdef CONFIG_HAVE_TCM
4150 extern char __tcm_start, __tcm_end;
4151+#endif
4152
4153+#ifdef CONFIG_PAX_KERNEXEC
4154+ unsigned long addr;
4155+ pgd_t *pgd;
4156+ pud_t *pud;
4157+ pmd_t *pmd;
4158+ int cpu_arch = cpu_architecture();
4159+ unsigned int cr = get_cr();
4160+
4161+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4162+ /* make pages tables, etc before .text NX */
4163+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4164+ pgd = pgd_offset_k(addr);
4165+ pud = pud_offset(pgd, addr);
4166+ pmd = pmd_offset(pud, addr);
4167+ __section_update(pmd, addr, PMD_SECT_XN);
4168+ }
4169+ /* make init NX */
4170+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4171+ pgd = pgd_offset_k(addr);
4172+ pud = pud_offset(pgd, addr);
4173+ pmd = pmd_offset(pud, addr);
4174+ __section_update(pmd, addr, PMD_SECT_XN);
4175+ }
4176+ /* make kernel code/rodata RX */
4177+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4178+ pgd = pgd_offset_k(addr);
4179+ pud = pud_offset(pgd, addr);
4180+ pmd = pmd_offset(pud, addr);
4181+#ifdef CONFIG_ARM_LPAE
4182+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4183+#else
4184+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4185+#endif
4186+ }
4187+ }
4188+#endif
4189+
4190+#ifdef CONFIG_HAVE_TCM
4191 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4192 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4193 #endif
4194diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4195index d1e5ad7..84dcbf2 100644
4196--- a/arch/arm/mm/ioremap.c
4197+++ b/arch/arm/mm/ioremap.c
4198@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4199 unsigned int mtype;
4200
4201 if (cached)
4202- mtype = MT_MEMORY_RWX;
4203+ mtype = MT_MEMORY_RX;
4204 else
4205- mtype = MT_MEMORY_RWX_NONCACHED;
4206+ mtype = MT_MEMORY_RX_NONCACHED;
4207
4208 return __arm_ioremap_caller(phys_addr, size, mtype,
4209 __builtin_return_address(0));
4210diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4211index 5e85ed3..b10a7ed 100644
4212--- a/arch/arm/mm/mmap.c
4213+++ b/arch/arm/mm/mmap.c
4214@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4215 struct vm_area_struct *vma;
4216 int do_align = 0;
4217 int aliasing = cache_is_vipt_aliasing();
4218+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4219 struct vm_unmapped_area_info info;
4220
4221 /*
4222@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4223 if (len > TASK_SIZE)
4224 return -ENOMEM;
4225
4226+#ifdef CONFIG_PAX_RANDMMAP
4227+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4228+#endif
4229+
4230 if (addr) {
4231 if (do_align)
4232 addr = COLOUR_ALIGN(addr, pgoff);
4233@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4234 addr = PAGE_ALIGN(addr);
4235
4236 vma = find_vma(mm, addr);
4237- if (TASK_SIZE - len >= addr &&
4238- (!vma || addr + len <= vma->vm_start))
4239+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4240 return addr;
4241 }
4242
4243@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4244 info.high_limit = TASK_SIZE;
4245 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4246 info.align_offset = pgoff << PAGE_SHIFT;
4247+ info.threadstack_offset = offset;
4248 return vm_unmapped_area(&info);
4249 }
4250
4251@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4252 unsigned long addr = addr0;
4253 int do_align = 0;
4254 int aliasing = cache_is_vipt_aliasing();
4255+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4256 struct vm_unmapped_area_info info;
4257
4258 /*
4259@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4260 return addr;
4261 }
4262
4263+#ifdef CONFIG_PAX_RANDMMAP
4264+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4265+#endif
4266+
4267 /* requesting a specific address */
4268 if (addr) {
4269 if (do_align)
4270@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4271 else
4272 addr = PAGE_ALIGN(addr);
4273 vma = find_vma(mm, addr);
4274- if (TASK_SIZE - len >= addr &&
4275- (!vma || addr + len <= vma->vm_start))
4276+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4277 return addr;
4278 }
4279
4280@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4281 info.high_limit = mm->mmap_base;
4282 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4283 info.align_offset = pgoff << PAGE_SHIFT;
4284+ info.threadstack_offset = offset;
4285 addr = vm_unmapped_area(&info);
4286
4287 /*
4288@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4289 {
4290 unsigned long random_factor = 0UL;
4291
4292+#ifdef CONFIG_PAX_RANDMMAP
4293+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4294+#endif
4295+
4296 /* 8 bits of randomness in 20 address space bits */
4297 if ((current->flags & PF_RANDOMIZE) &&
4298 !(current->personality & ADDR_NO_RANDOMIZE))
4299@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4300
4301 if (mmap_is_legacy()) {
4302 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4303+
4304+#ifdef CONFIG_PAX_RANDMMAP
4305+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4306+ mm->mmap_base += mm->delta_mmap;
4307+#endif
4308+
4309 mm->get_unmapped_area = arch_get_unmapped_area;
4310 } else {
4311 mm->mmap_base = mmap_base(random_factor);
4312+
4313+#ifdef CONFIG_PAX_RANDMMAP
4314+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4315+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4316+#endif
4317+
4318 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4319 }
4320 }
4321diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4322index 4e6ef89..21c27f2 100644
4323--- a/arch/arm/mm/mmu.c
4324+++ b/arch/arm/mm/mmu.c
4325@@ -41,6 +41,22 @@
4326 #include "mm.h"
4327 #include "tcm.h"
4328
4329+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4330+void modify_domain(unsigned int dom, unsigned int type)
4331+{
4332+ struct thread_info *thread = current_thread_info();
4333+ unsigned int domain = thread->cpu_domain;
4334+ /*
4335+ * DOMAIN_MANAGER might be defined to some other value,
4336+ * use the arch-defined constant
4337+ */
4338+ domain &= ~domain_val(dom, 3);
4339+ thread->cpu_domain = domain | domain_val(dom, type);
4340+ set_domain(thread->cpu_domain);
4341+}
4342+EXPORT_SYMBOL(modify_domain);
4343+#endif
4344+
4345 /*
4346 * empty_zero_page is a special page that is used for
4347 * zero-initialized data and COW.
4348@@ -242,7 +258,15 @@ __setup("noalign", noalign_setup);
4349 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
4350 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4351
4352-static struct mem_type mem_types[] = {
4353+#ifdef CONFIG_PAX_KERNEXEC
4354+#define L_PTE_KERNEXEC L_PTE_RDONLY
4355+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4356+#else
4357+#define L_PTE_KERNEXEC L_PTE_DIRTY
4358+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4359+#endif
4360+
4361+static struct mem_type mem_types[] __read_only = {
4362 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4363 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4364 L_PTE_SHARED,
4365@@ -271,19 +295,19 @@ static struct mem_type mem_types[] = {
4366 .prot_sect = PROT_SECT_DEVICE,
4367 .domain = DOMAIN_IO,
4368 },
4369- [MT_UNCACHED] = {
4370+ [MT_UNCACHED_RW] = {
4371 .prot_pte = PROT_PTE_DEVICE,
4372 .prot_l1 = PMD_TYPE_TABLE,
4373 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4374 .domain = DOMAIN_IO,
4375 },
4376- [MT_CACHECLEAN] = {
4377- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4378+ [MT_CACHECLEAN_RO] = {
4379+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
4380 .domain = DOMAIN_KERNEL,
4381 },
4382 #ifndef CONFIG_ARM_LPAE
4383- [MT_MINICLEAN] = {
4384- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4385+ [MT_MINICLEAN_RO] = {
4386+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
4387 .domain = DOMAIN_KERNEL,
4388 },
4389 #endif
4390@@ -291,15 +315,15 @@ static struct mem_type mem_types[] = {
4391 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4392 L_PTE_RDONLY,
4393 .prot_l1 = PMD_TYPE_TABLE,
4394- .domain = DOMAIN_USER,
4395+ .domain = DOMAIN_VECTORS,
4396 },
4397 [MT_HIGH_VECTORS] = {
4398 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4399 L_PTE_USER | L_PTE_RDONLY,
4400 .prot_l1 = PMD_TYPE_TABLE,
4401- .domain = DOMAIN_USER,
4402+ .domain = DOMAIN_VECTORS,
4403 },
4404- [MT_MEMORY_RWX] = {
4405+ [__MT_MEMORY_RWX] = {
4406 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4407 .prot_l1 = PMD_TYPE_TABLE,
4408 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4409@@ -312,17 +336,30 @@ static struct mem_type mem_types[] = {
4410 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4411 .domain = DOMAIN_KERNEL,
4412 },
4413- [MT_ROM] = {
4414- .prot_sect = PMD_TYPE_SECT,
4415+ [MT_MEMORY_RX] = {
4416+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4417+ .prot_l1 = PMD_TYPE_TABLE,
4418+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4419+ .domain = DOMAIN_KERNEL,
4420+ },
4421+ [MT_ROM_RX] = {
4422+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4423 .domain = DOMAIN_KERNEL,
4424 },
4425- [MT_MEMORY_RWX_NONCACHED] = {
4426+ [MT_MEMORY_RW_NONCACHED] = {
4427 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4428 L_PTE_MT_BUFFERABLE,
4429 .prot_l1 = PMD_TYPE_TABLE,
4430 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4431 .domain = DOMAIN_KERNEL,
4432 },
4433+ [MT_MEMORY_RX_NONCACHED] = {
4434+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4435+ L_PTE_MT_BUFFERABLE,
4436+ .prot_l1 = PMD_TYPE_TABLE,
4437+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4438+ .domain = DOMAIN_KERNEL,
4439+ },
4440 [MT_MEMORY_RW_DTCM] = {
4441 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4442 L_PTE_XN,
4443@@ -330,9 +367,10 @@ static struct mem_type mem_types[] = {
4444 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4445 .domain = DOMAIN_KERNEL,
4446 },
4447- [MT_MEMORY_RWX_ITCM] = {
4448- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4449+ [MT_MEMORY_RX_ITCM] = {
4450+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4451 .prot_l1 = PMD_TYPE_TABLE,
4452+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4453 .domain = DOMAIN_KERNEL,
4454 },
4455 [MT_MEMORY_RW_SO] = {
4456@@ -544,9 +582,14 @@ static void __init build_mem_type_table(void)
4457 * Mark cache clean areas and XIP ROM read only
4458 * from SVC mode and no access from userspace.
4459 */
4460- mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4461- mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4462- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4463+ mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4464+#ifdef CONFIG_PAX_KERNEXEC
4465+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4466+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4467+ mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4468+#endif
4469+ mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4470+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4471 #endif
4472
4473 /*
4474@@ -563,13 +606,17 @@ static void __init build_mem_type_table(void)
4475 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4476 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4477 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4478- mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4479- mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4480+ mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4481+ mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4482 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4483 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4484+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4485+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4486 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4487- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
4488- mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
4489+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
4490+ mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
4491+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
4492+ mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
4493 }
4494 }
4495
4496@@ -580,15 +627,20 @@ static void __init build_mem_type_table(void)
4497 if (cpu_arch >= CPU_ARCH_ARMv6) {
4498 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4499 /* Non-cacheable Normal is XCB = 001 */
4500- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4501+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4502+ PMD_SECT_BUFFERED;
4503+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4504 PMD_SECT_BUFFERED;
4505 } else {
4506 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4507- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4508+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4509+ PMD_SECT_TEX(1);
4510+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4511 PMD_SECT_TEX(1);
4512 }
4513 } else {
4514- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4515+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4516+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4517 }
4518
4519 #ifdef CONFIG_ARM_LPAE
4520@@ -609,6 +661,8 @@ static void __init build_mem_type_table(void)
4521 user_pgprot |= PTE_EXT_PXN;
4522 #endif
4523
4524+ user_pgprot |= __supported_pte_mask;
4525+
4526 for (i = 0; i < 16; i++) {
4527 pteval_t v = pgprot_val(protection_map[i]);
4528 protection_map[i] = __pgprot(v | user_pgprot);
4529@@ -626,21 +680,24 @@ static void __init build_mem_type_table(void)
4530
4531 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4532 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4533- mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4534- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4535+ mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4536+ mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4537 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4538 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4539+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4540+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4541 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4542- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
4543- mem_types[MT_ROM].prot_sect |= cp->pmd;
4544+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
4545+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
4546+ mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
4547
4548 switch (cp->pmd) {
4549 case PMD_SECT_WT:
4550- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
4551+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
4552 break;
4553 case PMD_SECT_WB:
4554 case PMD_SECT_WBWA:
4555- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
4556+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
4557 break;
4558 }
4559 pr_info("Memory policy: %sData cache %s\n",
4560@@ -854,7 +911,7 @@ static void __init create_mapping(struct map_desc *md)
4561 return;
4562 }
4563
4564- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
4565+ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
4566 md->virtual >= PAGE_OFFSET &&
4567 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
4568 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
4569@@ -1218,18 +1275,15 @@ void __init arm_mm_memblock_reserve(void)
4570 * called function. This means you can't use any function or debugging
4571 * method which may touch any device, otherwise the kernel _will_ crash.
4572 */
4573+
4574+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4575+
4576 static void __init devicemaps_init(const struct machine_desc *mdesc)
4577 {
4578 struct map_desc map;
4579 unsigned long addr;
4580- void *vectors;
4581
4582- /*
4583- * Allocate the vector page early.
4584- */
4585- vectors = early_alloc(PAGE_SIZE * 2);
4586-
4587- early_trap_init(vectors);
4588+ early_trap_init(&vectors);
4589
4590 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4591 pmd_clear(pmd_off_k(addr));
4592@@ -1242,7 +1296,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4593 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
4594 map.virtual = MODULES_VADDR;
4595 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
4596- map.type = MT_ROM;
4597+ map.type = MT_ROM_RX;
4598 create_mapping(&map);
4599 #endif
4600
4601@@ -1253,14 +1307,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4602 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
4603 map.virtual = FLUSH_BASE;
4604 map.length = SZ_1M;
4605- map.type = MT_CACHECLEAN;
4606+ map.type = MT_CACHECLEAN_RO;
4607 create_mapping(&map);
4608 #endif
4609 #ifdef FLUSH_BASE_MINICACHE
4610 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
4611 map.virtual = FLUSH_BASE_MINICACHE;
4612 map.length = SZ_1M;
4613- map.type = MT_MINICLEAN;
4614+ map.type = MT_MINICLEAN_RO;
4615 create_mapping(&map);
4616 #endif
4617
4618@@ -1269,7 +1323,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4619 * location (0xffff0000). If we aren't using high-vectors, also
4620 * create a mapping at the low-vectors virtual address.
4621 */
4622- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4623+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4624 map.virtual = 0xffff0000;
4625 map.length = PAGE_SIZE;
4626 #ifdef CONFIG_KUSER_HELPERS
4627@@ -1329,8 +1383,10 @@ static void __init kmap_init(void)
4628 static void __init map_lowmem(void)
4629 {
4630 struct memblock_region *reg;
4631+#ifndef CONFIG_PAX_KERNEXEC
4632 phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
4633 phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
4634+#endif
4635
4636 /* Map all the lowmem memory banks. */
4637 for_each_memblock(memory, reg) {
4638@@ -1343,11 +1399,48 @@ static void __init map_lowmem(void)
4639 if (start >= end)
4640 break;
4641
4642+#ifdef CONFIG_PAX_KERNEXEC
4643+ map.pfn = __phys_to_pfn(start);
4644+ map.virtual = __phys_to_virt(start);
4645+ map.length = end - start;
4646+
4647+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4648+ struct map_desc kernel;
4649+ struct map_desc initmap;
4650+
4651+ /* when freeing initmem we will make this RW */
4652+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4653+ initmap.virtual = (unsigned long)__init_begin;
4654+ initmap.length = _sdata - __init_begin;
4655+ initmap.type = __MT_MEMORY_RWX;
4656+ create_mapping(&initmap);
4657+
4658+ /* when freeing initmem we will make this RX */
4659+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4660+ kernel.virtual = (unsigned long)_stext;
4661+ kernel.length = __init_begin - _stext;
4662+ kernel.type = __MT_MEMORY_RWX;
4663+ create_mapping(&kernel);
4664+
4665+ if (map.virtual < (unsigned long)_stext) {
4666+ map.length = (unsigned long)_stext - map.virtual;
4667+ map.type = __MT_MEMORY_RWX;
4668+ create_mapping(&map);
4669+ }
4670+
4671+ map.pfn = __phys_to_pfn(__pa(_sdata));
4672+ map.virtual = (unsigned long)_sdata;
4673+ map.length = end - __pa(_sdata);
4674+ }
4675+
4676+ map.type = MT_MEMORY_RW;
4677+ create_mapping(&map);
4678+#else
4679 if (end < kernel_x_start) {
4680 map.pfn = __phys_to_pfn(start);
4681 map.virtual = __phys_to_virt(start);
4682 map.length = end - start;
4683- map.type = MT_MEMORY_RWX;
4684+ map.type = __MT_MEMORY_RWX;
4685
4686 create_mapping(&map);
4687 } else if (start >= kernel_x_end) {
4688@@ -1371,7 +1464,7 @@ static void __init map_lowmem(void)
4689 map.pfn = __phys_to_pfn(kernel_x_start);
4690 map.virtual = __phys_to_virt(kernel_x_start);
4691 map.length = kernel_x_end - kernel_x_start;
4692- map.type = MT_MEMORY_RWX;
4693+ map.type = __MT_MEMORY_RWX;
4694
4695 create_mapping(&map);
4696
4697@@ -1384,6 +1477,7 @@ static void __init map_lowmem(void)
4698 create_mapping(&map);
4699 }
4700 }
4701+#endif
4702 }
4703 }
4704
4705diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
4706index f412b53..fc89433 100644
4707--- a/arch/arm/net/bpf_jit_32.c
4708+++ b/arch/arm/net/bpf_jit_32.c
4709@@ -20,6 +20,7 @@
4710 #include <asm/cacheflush.h>
4711 #include <asm/hwcap.h>
4712 #include <asm/opcodes.h>
4713+#include <asm/pgtable.h>
4714
4715 #include "bpf_jit_32.h"
4716
4717@@ -71,7 +72,11 @@ struct jit_ctx {
4718 #endif
4719 };
4720
4721+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
4722+int bpf_jit_enable __read_only;
4723+#else
4724 int bpf_jit_enable __read_mostly;
4725+#endif
4726
4727 static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
4728 {
4729@@ -178,8 +183,10 @@ static void jit_fill_hole(void *area, unsigned int size)
4730 {
4731 u32 *ptr;
4732 /* We are guaranteed to have aligned memory. */
4733+ pax_open_kernel();
4734 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
4735 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
4736+ pax_close_kernel();
4737 }
4738
4739 static void build_prologue(struct jit_ctx *ctx)
4740diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
4741index 5b217f4..c23f40e 100644
4742--- a/arch/arm/plat-iop/setup.c
4743+++ b/arch/arm/plat-iop/setup.c
4744@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
4745 .virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
4746 .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
4747 .length = IOP3XX_PERIPHERAL_SIZE,
4748- .type = MT_UNCACHED,
4749+ .type = MT_UNCACHED_RW,
4750 },
4751 };
4752
4753diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4754index a5bc92d..0bb4730 100644
4755--- a/arch/arm/plat-omap/sram.c
4756+++ b/arch/arm/plat-omap/sram.c
4757@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4758 * Looks like we need to preserve some bootloader code at the
4759 * beginning of SRAM for jumping to flash for reboot to work...
4760 */
4761+ pax_open_kernel();
4762 memset_io(omap_sram_base + omap_sram_skip, 0,
4763 omap_sram_size - omap_sram_skip);
4764+ pax_close_kernel();
4765 }
4766diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
4767index 7047051..44e8675 100644
4768--- a/arch/arm64/include/asm/atomic.h
4769+++ b/arch/arm64/include/asm/atomic.h
4770@@ -252,5 +252,15 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
4771 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
4772 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
4773
4774+#define atomic64_read_unchecked(v) atomic64_read(v)
4775+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4776+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4777+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4778+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4779+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4780+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4781+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4782+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4783+
4784 #endif
4785 #endif
4786diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
4787index a5abb00..9cbca9a 100644
4788--- a/arch/arm64/include/asm/barrier.h
4789+++ b/arch/arm64/include/asm/barrier.h
4790@@ -44,7 +44,7 @@
4791 do { \
4792 compiletime_assert_atomic_type(*p); \
4793 barrier(); \
4794- ACCESS_ONCE(*p) = (v); \
4795+ ACCESS_ONCE_RW(*p) = (v); \
4796 } while (0)
4797
4798 #define smp_load_acquire(p) \
4799diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
4800index 4fde8c1..441f84f 100644
4801--- a/arch/arm64/include/asm/percpu.h
4802+++ b/arch/arm64/include/asm/percpu.h
4803@@ -135,16 +135,16 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
4804 {
4805 switch (size) {
4806 case 1:
4807- ACCESS_ONCE(*(u8 *)ptr) = (u8)val;
4808+ ACCESS_ONCE_RW(*(u8 *)ptr) = (u8)val;
4809 break;
4810 case 2:
4811- ACCESS_ONCE(*(u16 *)ptr) = (u16)val;
4812+ ACCESS_ONCE_RW(*(u16 *)ptr) = (u16)val;
4813 break;
4814 case 4:
4815- ACCESS_ONCE(*(u32 *)ptr) = (u32)val;
4816+ ACCESS_ONCE_RW(*(u32 *)ptr) = (u32)val;
4817 break;
4818 case 8:
4819- ACCESS_ONCE(*(u64 *)ptr) = (u64)val;
4820+ ACCESS_ONCE_RW(*(u64 *)ptr) = (u64)val;
4821 break;
4822 default:
4823 BUILD_BUG();
4824diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
4825index e20df38..027ede3 100644
4826--- a/arch/arm64/include/asm/pgalloc.h
4827+++ b/arch/arm64/include/asm/pgalloc.h
4828@@ -46,6 +46,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4829 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
4830 }
4831
4832+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4833+{
4834+ pud_populate(mm, pud, pmd);
4835+}
4836+
4837 #endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */
4838
4839 #if CONFIG_ARM64_PGTABLE_LEVELS > 3
4840diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
4841index 07e1ba44..ec8cbbb 100644
4842--- a/arch/arm64/include/asm/uaccess.h
4843+++ b/arch/arm64/include/asm/uaccess.h
4844@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
4845 flag; \
4846 })
4847
4848+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
4849 #define access_ok(type, addr, size) __range_ok(addr, size)
4850 #define user_addr_max get_fs
4851
4852diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
4853index b0bd4e5..54e82f6 100644
4854--- a/arch/arm64/mm/dma-mapping.c
4855+++ b/arch/arm64/mm/dma-mapping.c
4856@@ -134,7 +134,7 @@ static void __dma_free_coherent(struct device *dev, size_t size,
4857 phys_to_page(paddr),
4858 size >> PAGE_SHIFT);
4859 if (!freed)
4860- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
4861+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
4862 }
4863
4864 static void *__dma_alloc(struct device *dev, size_t size,
4865diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4866index c3a58a1..78fbf54 100644
4867--- a/arch/avr32/include/asm/cache.h
4868+++ b/arch/avr32/include/asm/cache.h
4869@@ -1,8 +1,10 @@
4870 #ifndef __ASM_AVR32_CACHE_H
4871 #define __ASM_AVR32_CACHE_H
4872
4873+#include <linux/const.h>
4874+
4875 #define L1_CACHE_SHIFT 5
4876-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4877+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4878
4879 /*
4880 * Memory returned by kmalloc() may be used for DMA, so we must make
4881diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4882index d232888..87c8df1 100644
4883--- a/arch/avr32/include/asm/elf.h
4884+++ b/arch/avr32/include/asm/elf.h
4885@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4886 the loader. We need to make sure that it is out of the way of the program
4887 that it will "exec", and that there is sufficient room for the brk. */
4888
4889-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4890+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4891
4892+#ifdef CONFIG_PAX_ASLR
4893+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4894+
4895+#define PAX_DELTA_MMAP_LEN 15
4896+#define PAX_DELTA_STACK_LEN 15
4897+#endif
4898
4899 /* This yields a mask that user programs can use to figure out what
4900 instruction set this CPU supports. This could be done in user space,
4901diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4902index 479330b..53717a8 100644
4903--- a/arch/avr32/include/asm/kmap_types.h
4904+++ b/arch/avr32/include/asm/kmap_types.h
4905@@ -2,9 +2,9 @@
4906 #define __ASM_AVR32_KMAP_TYPES_H
4907
4908 #ifdef CONFIG_DEBUG_HIGHMEM
4909-# define KM_TYPE_NR 29
4910+# define KM_TYPE_NR 30
4911 #else
4912-# define KM_TYPE_NR 14
4913+# define KM_TYPE_NR 15
4914 #endif
4915
4916 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4917diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4918index d223a8b..69c5210 100644
4919--- a/arch/avr32/mm/fault.c
4920+++ b/arch/avr32/mm/fault.c
4921@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4922
4923 int exception_trace = 1;
4924
4925+#ifdef CONFIG_PAX_PAGEEXEC
4926+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4927+{
4928+ unsigned long i;
4929+
4930+ printk(KERN_ERR "PAX: bytes at PC: ");
4931+ for (i = 0; i < 20; i++) {
4932+ unsigned char c;
4933+ if (get_user(c, (unsigned char *)pc+i))
4934+ printk(KERN_CONT "???????? ");
4935+ else
4936+ printk(KERN_CONT "%02x ", c);
4937+ }
4938+ printk("\n");
4939+}
4940+#endif
4941+
4942 /*
4943 * This routine handles page faults. It determines the address and the
4944 * problem, and then passes it off to one of the appropriate routines.
4945@@ -178,6 +195,16 @@ bad_area:
4946 up_read(&mm->mmap_sem);
4947
4948 if (user_mode(regs)) {
4949+
4950+#ifdef CONFIG_PAX_PAGEEXEC
4951+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4952+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4953+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4954+ do_group_exit(SIGKILL);
4955+ }
4956+ }
4957+#endif
4958+
4959 if (exception_trace && printk_ratelimit())
4960 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4961 "sp %08lx ecr %lu\n",
4962diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4963index 568885a..f8008df 100644
4964--- a/arch/blackfin/include/asm/cache.h
4965+++ b/arch/blackfin/include/asm/cache.h
4966@@ -7,6 +7,7 @@
4967 #ifndef __ARCH_BLACKFIN_CACHE_H
4968 #define __ARCH_BLACKFIN_CACHE_H
4969
4970+#include <linux/const.h>
4971 #include <linux/linkage.h> /* for asmlinkage */
4972
4973 /*
4974@@ -14,7 +15,7 @@
4975 * Blackfin loads 32 bytes for cache
4976 */
4977 #define L1_CACHE_SHIFT 5
4978-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4979+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4980 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4981
4982 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4983diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4984index aea2718..3639a60 100644
4985--- a/arch/cris/include/arch-v10/arch/cache.h
4986+++ b/arch/cris/include/arch-v10/arch/cache.h
4987@@ -1,8 +1,9 @@
4988 #ifndef _ASM_ARCH_CACHE_H
4989 #define _ASM_ARCH_CACHE_H
4990
4991+#include <linux/const.h>
4992 /* Etrax 100LX have 32-byte cache-lines. */
4993-#define L1_CACHE_BYTES 32
4994 #define L1_CACHE_SHIFT 5
4995+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4996
4997 #endif /* _ASM_ARCH_CACHE_H */
4998diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4999index 7caf25d..ee65ac5 100644
5000--- a/arch/cris/include/arch-v32/arch/cache.h
5001+++ b/arch/cris/include/arch-v32/arch/cache.h
5002@@ -1,11 +1,12 @@
5003 #ifndef _ASM_CRIS_ARCH_CACHE_H
5004 #define _ASM_CRIS_ARCH_CACHE_H
5005
5006+#include <linux/const.h>
5007 #include <arch/hwregs/dma.h>
5008
5009 /* A cache-line is 32 bytes. */
5010-#define L1_CACHE_BYTES 32
5011 #define L1_CACHE_SHIFT 5
5012+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5013
5014 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5015
5016diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
5017index 102190a..5334cea 100644
5018--- a/arch/frv/include/asm/atomic.h
5019+++ b/arch/frv/include/asm/atomic.h
5020@@ -181,6 +181,16 @@ static inline void atomic64_dec(atomic64_t *v)
5021 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
5022 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
5023
5024+#define atomic64_read_unchecked(v) atomic64_read(v)
5025+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5026+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5027+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5028+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5029+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5030+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5031+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5032+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5033+
5034 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5035 {
5036 int c, old;
5037diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
5038index 2797163..c2a401df9 100644
5039--- a/arch/frv/include/asm/cache.h
5040+++ b/arch/frv/include/asm/cache.h
5041@@ -12,10 +12,11 @@
5042 #ifndef __ASM_CACHE_H
5043 #define __ASM_CACHE_H
5044
5045+#include <linux/const.h>
5046
5047 /* bytes per L1 cache line */
5048 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
5049-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5050+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5051
5052 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5053 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5054diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
5055index 43901f2..0d8b865 100644
5056--- a/arch/frv/include/asm/kmap_types.h
5057+++ b/arch/frv/include/asm/kmap_types.h
5058@@ -2,6 +2,6 @@
5059 #ifndef _ASM_KMAP_TYPES_H
5060 #define _ASM_KMAP_TYPES_H
5061
5062-#define KM_TYPE_NR 17
5063+#define KM_TYPE_NR 18
5064
5065 #endif
5066diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
5067index 836f147..4cf23f5 100644
5068--- a/arch/frv/mm/elf-fdpic.c
5069+++ b/arch/frv/mm/elf-fdpic.c
5070@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5071 {
5072 struct vm_area_struct *vma;
5073 struct vm_unmapped_area_info info;
5074+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5075
5076 if (len > TASK_SIZE)
5077 return -ENOMEM;
5078@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5079 if (addr) {
5080 addr = PAGE_ALIGN(addr);
5081 vma = find_vma(current->mm, addr);
5082- if (TASK_SIZE - len >= addr &&
5083- (!vma || addr + len <= vma->vm_start))
5084+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
5085 goto success;
5086 }
5087
5088@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5089 info.high_limit = (current->mm->start_stack - 0x00200000);
5090 info.align_mask = 0;
5091 info.align_offset = 0;
5092+ info.threadstack_offset = offset;
5093 addr = vm_unmapped_area(&info);
5094 if (!(addr & ~PAGE_MASK))
5095 goto success;
5096diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
5097index 69952c18..4fa2908 100644
5098--- a/arch/hexagon/include/asm/cache.h
5099+++ b/arch/hexagon/include/asm/cache.h
5100@@ -21,9 +21,11 @@
5101 #ifndef __ASM_CACHE_H
5102 #define __ASM_CACHE_H
5103
5104+#include <linux/const.h>
5105+
5106 /* Bytes per L1 cache line */
5107-#define L1_CACHE_SHIFT (5)
5108-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5109+#define L1_CACHE_SHIFT 5
5110+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5111
5112 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5113
5114diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
5115index 074e52b..76afdac 100644
5116--- a/arch/ia64/Kconfig
5117+++ b/arch/ia64/Kconfig
5118@@ -548,6 +548,7 @@ source "drivers/sn/Kconfig"
5119 config KEXEC
5120 bool "kexec system call"
5121 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
5122+ depends on !GRKERNSEC_KMEM
5123 help
5124 kexec is a system call that implements the ability to shutdown your
5125 current kernel, and to start another kernel. It is like a reboot
5126diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
5127index 970d0bd..e750b9b 100644
5128--- a/arch/ia64/Makefile
5129+++ b/arch/ia64/Makefile
5130@@ -98,5 +98,6 @@ endef
5131 archprepare: make_nr_irqs_h FORCE
5132 PHONY += make_nr_irqs_h FORCE
5133
5134+make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
5135 make_nr_irqs_h: FORCE
5136 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
5137diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
5138index 0bf0350..2ad1957 100644
5139--- a/arch/ia64/include/asm/atomic.h
5140+++ b/arch/ia64/include/asm/atomic.h
5141@@ -193,4 +193,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
5142 #define atomic64_inc(v) atomic64_add(1, (v))
5143 #define atomic64_dec(v) atomic64_sub(1, (v))
5144
5145+#define atomic64_read_unchecked(v) atomic64_read(v)
5146+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5147+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5148+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5149+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5150+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5151+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5152+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5153+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5154+
5155 #endif /* _ASM_IA64_ATOMIC_H */
5156diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
5157index f6769eb..1cdb590 100644
5158--- a/arch/ia64/include/asm/barrier.h
5159+++ b/arch/ia64/include/asm/barrier.h
5160@@ -66,7 +66,7 @@
5161 do { \
5162 compiletime_assert_atomic_type(*p); \
5163 barrier(); \
5164- ACCESS_ONCE(*p) = (v); \
5165+ ACCESS_ONCE_RW(*p) = (v); \
5166 } while (0)
5167
5168 #define smp_load_acquire(p) \
5169diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
5170index 988254a..e1ee885 100644
5171--- a/arch/ia64/include/asm/cache.h
5172+++ b/arch/ia64/include/asm/cache.h
5173@@ -1,6 +1,7 @@
5174 #ifndef _ASM_IA64_CACHE_H
5175 #define _ASM_IA64_CACHE_H
5176
5177+#include <linux/const.h>
5178
5179 /*
5180 * Copyright (C) 1998-2000 Hewlett-Packard Co
5181@@ -9,7 +10,7 @@
5182
5183 /* Bytes per L1 (data) cache line. */
5184 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
5185-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5186+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5187
5188 #ifdef CONFIG_SMP
5189 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5190diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
5191index 5a83c5c..4d7f553 100644
5192--- a/arch/ia64/include/asm/elf.h
5193+++ b/arch/ia64/include/asm/elf.h
5194@@ -42,6 +42,13 @@
5195 */
5196 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
5197
5198+#ifdef CONFIG_PAX_ASLR
5199+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
5200+
5201+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5202+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5203+#endif
5204+
5205 #define PT_IA_64_UNWIND 0x70000001
5206
5207 /* IA-64 relocations: */
5208diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
5209index 5767cdf..7462574 100644
5210--- a/arch/ia64/include/asm/pgalloc.h
5211+++ b/arch/ia64/include/asm/pgalloc.h
5212@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5213 pgd_val(*pgd_entry) = __pa(pud);
5214 }
5215
5216+static inline void
5217+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5218+{
5219+ pgd_populate(mm, pgd_entry, pud);
5220+}
5221+
5222 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
5223 {
5224 return quicklist_alloc(0, GFP_KERNEL, NULL);
5225@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5226 pud_val(*pud_entry) = __pa(pmd);
5227 }
5228
5229+static inline void
5230+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5231+{
5232+ pud_populate(mm, pud_entry, pmd);
5233+}
5234+
5235 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5236 {
5237 return quicklist_alloc(0, GFP_KERNEL, NULL);
5238diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
5239index 7b6f880..ac8e008 100644
5240--- a/arch/ia64/include/asm/pgtable.h
5241+++ b/arch/ia64/include/asm/pgtable.h
5242@@ -12,7 +12,7 @@
5243 * David Mosberger-Tang <davidm@hpl.hp.com>
5244 */
5245
5246-
5247+#include <linux/const.h>
5248 #include <asm/mman.h>
5249 #include <asm/page.h>
5250 #include <asm/processor.h>
5251@@ -139,6 +139,17 @@
5252 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5253 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5254 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
5255+
5256+#ifdef CONFIG_PAX_PAGEEXEC
5257+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
5258+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5259+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5260+#else
5261+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5262+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5263+# define PAGE_COPY_NOEXEC PAGE_COPY
5264+#endif
5265+
5266 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5267 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5268 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5269diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5270index 45698cd..e8e2dbc 100644
5271--- a/arch/ia64/include/asm/spinlock.h
5272+++ b/arch/ia64/include/asm/spinlock.h
5273@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5274 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5275
5276 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5277- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5278+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5279 }
5280
5281 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5282diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5283index 4f3fb6cc..254055e 100644
5284--- a/arch/ia64/include/asm/uaccess.h
5285+++ b/arch/ia64/include/asm/uaccess.h
5286@@ -70,6 +70,7 @@
5287 && ((segment).seg == KERNEL_DS.seg \
5288 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
5289 })
5290+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5291 #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
5292
5293 /*
5294@@ -241,12 +242,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5295 static inline unsigned long
5296 __copy_to_user (void __user *to, const void *from, unsigned long count)
5297 {
5298+ if (count > INT_MAX)
5299+ return count;
5300+
5301+ if (!__builtin_constant_p(count))
5302+ check_object_size(from, count, true);
5303+
5304 return __copy_user(to, (__force void __user *) from, count);
5305 }
5306
5307 static inline unsigned long
5308 __copy_from_user (void *to, const void __user *from, unsigned long count)
5309 {
5310+ if (count > INT_MAX)
5311+ return count;
5312+
5313+ if (!__builtin_constant_p(count))
5314+ check_object_size(to, count, false);
5315+
5316 return __copy_user((__force void __user *) to, from, count);
5317 }
5318
5319@@ -256,10 +269,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5320 ({ \
5321 void __user *__cu_to = (to); \
5322 const void *__cu_from = (from); \
5323- long __cu_len = (n); \
5324+ unsigned long __cu_len = (n); \
5325 \
5326- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5327+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5328+ if (!__builtin_constant_p(n)) \
5329+ check_object_size(__cu_from, __cu_len, true); \
5330 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5331+ } \
5332 __cu_len; \
5333 })
5334
5335@@ -267,11 +283,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5336 ({ \
5337 void *__cu_to = (to); \
5338 const void __user *__cu_from = (from); \
5339- long __cu_len = (n); \
5340+ unsigned long __cu_len = (n); \
5341 \
5342 __chk_user_ptr(__cu_from); \
5343- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5344+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5345+ if (!__builtin_constant_p(n)) \
5346+ check_object_size(__cu_to, __cu_len, false); \
5347 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5348+ } \
5349 __cu_len; \
5350 })
5351
5352diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5353index 29754aa..06d2838 100644
5354--- a/arch/ia64/kernel/module.c
5355+++ b/arch/ia64/kernel/module.c
5356@@ -492,15 +492,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5357 }
5358
5359 static inline int
5360+in_init_rx (const struct module *mod, uint64_t addr)
5361+{
5362+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5363+}
5364+
5365+static inline int
5366+in_init_rw (const struct module *mod, uint64_t addr)
5367+{
5368+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5369+}
5370+
5371+static inline int
5372 in_init (const struct module *mod, uint64_t addr)
5373 {
5374- return addr - (uint64_t) mod->module_init < mod->init_size;
5375+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5376+}
5377+
5378+static inline int
5379+in_core_rx (const struct module *mod, uint64_t addr)
5380+{
5381+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5382+}
5383+
5384+static inline int
5385+in_core_rw (const struct module *mod, uint64_t addr)
5386+{
5387+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5388 }
5389
5390 static inline int
5391 in_core (const struct module *mod, uint64_t addr)
5392 {
5393- return addr - (uint64_t) mod->module_core < mod->core_size;
5394+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5395 }
5396
5397 static inline int
5398@@ -683,7 +707,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5399 break;
5400
5401 case RV_BDREL:
5402- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5403+ if (in_init_rx(mod, val))
5404+ val -= (uint64_t) mod->module_init_rx;
5405+ else if (in_init_rw(mod, val))
5406+ val -= (uint64_t) mod->module_init_rw;
5407+ else if (in_core_rx(mod, val))
5408+ val -= (uint64_t) mod->module_core_rx;
5409+ else if (in_core_rw(mod, val))
5410+ val -= (uint64_t) mod->module_core_rw;
5411 break;
5412
5413 case RV_LTV:
5414@@ -818,15 +849,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5415 * addresses have been selected...
5416 */
5417 uint64_t gp;
5418- if (mod->core_size > MAX_LTOFF)
5419+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5420 /*
5421 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5422 * at the end of the module.
5423 */
5424- gp = mod->core_size - MAX_LTOFF / 2;
5425+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5426 else
5427- gp = mod->core_size / 2;
5428- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5429+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5430+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5431 mod->arch.gp = gp;
5432 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5433 }
5434diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5435index c39c3cd..3c77738 100644
5436--- a/arch/ia64/kernel/palinfo.c
5437+++ b/arch/ia64/kernel/palinfo.c
5438@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5439 return NOTIFY_OK;
5440 }
5441
5442-static struct notifier_block __refdata palinfo_cpu_notifier =
5443+static struct notifier_block palinfo_cpu_notifier =
5444 {
5445 .notifier_call = palinfo_cpu_callback,
5446 .priority = 0,
5447diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5448index 41e33f8..65180b2a 100644
5449--- a/arch/ia64/kernel/sys_ia64.c
5450+++ b/arch/ia64/kernel/sys_ia64.c
5451@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5452 unsigned long align_mask = 0;
5453 struct mm_struct *mm = current->mm;
5454 struct vm_unmapped_area_info info;
5455+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5456
5457 if (len > RGN_MAP_LIMIT)
5458 return -ENOMEM;
5459@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5460 if (REGION_NUMBER(addr) == RGN_HPAGE)
5461 addr = 0;
5462 #endif
5463+
5464+#ifdef CONFIG_PAX_RANDMMAP
5465+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5466+ addr = mm->free_area_cache;
5467+ else
5468+#endif
5469+
5470 if (!addr)
5471 addr = TASK_UNMAPPED_BASE;
5472
5473@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5474 info.high_limit = TASK_SIZE;
5475 info.align_mask = align_mask;
5476 info.align_offset = 0;
5477+ info.threadstack_offset = offset;
5478 return vm_unmapped_area(&info);
5479 }
5480
5481diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5482index 84f8a52..7c76178 100644
5483--- a/arch/ia64/kernel/vmlinux.lds.S
5484+++ b/arch/ia64/kernel/vmlinux.lds.S
5485@@ -192,7 +192,7 @@ SECTIONS {
5486 /* Per-cpu data: */
5487 . = ALIGN(PERCPU_PAGE_SIZE);
5488 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5489- __phys_per_cpu_start = __per_cpu_load;
5490+ __phys_per_cpu_start = per_cpu_load;
5491 /*
5492 * ensure percpu data fits
5493 * into percpu page size
5494diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5495index ba5ba7a..36e9d3a 100644
5496--- a/arch/ia64/mm/fault.c
5497+++ b/arch/ia64/mm/fault.c
5498@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5499 return pte_present(pte);
5500 }
5501
5502+#ifdef CONFIG_PAX_PAGEEXEC
5503+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5504+{
5505+ unsigned long i;
5506+
5507+ printk(KERN_ERR "PAX: bytes at PC: ");
5508+ for (i = 0; i < 8; i++) {
5509+ unsigned int c;
5510+ if (get_user(c, (unsigned int *)pc+i))
5511+ printk(KERN_CONT "???????? ");
5512+ else
5513+ printk(KERN_CONT "%08x ", c);
5514+ }
5515+ printk("\n");
5516+}
5517+#endif
5518+
5519 # define VM_READ_BIT 0
5520 # define VM_WRITE_BIT 1
5521 # define VM_EXEC_BIT 2
5522@@ -151,8 +168,21 @@ retry:
5523 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5524 goto bad_area;
5525
5526- if ((vma->vm_flags & mask) != mask)
5527+ if ((vma->vm_flags & mask) != mask) {
5528+
5529+#ifdef CONFIG_PAX_PAGEEXEC
5530+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5531+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5532+ goto bad_area;
5533+
5534+ up_read(&mm->mmap_sem);
5535+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5536+ do_group_exit(SIGKILL);
5537+ }
5538+#endif
5539+
5540 goto bad_area;
5541+ }
5542
5543 /*
5544 * If for any reason at all we couldn't handle the fault, make
5545diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5546index 52b7604b..455cb85 100644
5547--- a/arch/ia64/mm/hugetlbpage.c
5548+++ b/arch/ia64/mm/hugetlbpage.c
5549@@ -143,6 +143,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5550 unsigned long pgoff, unsigned long flags)
5551 {
5552 struct vm_unmapped_area_info info;
5553+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5554
5555 if (len > RGN_MAP_LIMIT)
5556 return -ENOMEM;
5557@@ -166,6 +167,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5558 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5559 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5560 info.align_offset = 0;
5561+ info.threadstack_offset = offset;
5562 return vm_unmapped_area(&info);
5563 }
5564
5565diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5566index 6b33457..88b5124 100644
5567--- a/arch/ia64/mm/init.c
5568+++ b/arch/ia64/mm/init.c
5569@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5570 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5571 vma->vm_end = vma->vm_start + PAGE_SIZE;
5572 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5573+
5574+#ifdef CONFIG_PAX_PAGEEXEC
5575+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5576+ vma->vm_flags &= ~VM_EXEC;
5577+
5578+#ifdef CONFIG_PAX_MPROTECT
5579+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5580+ vma->vm_flags &= ~VM_MAYEXEC;
5581+#endif
5582+
5583+ }
5584+#endif
5585+
5586 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5587 down_write(&current->mm->mmap_sem);
5588 if (insert_vm_struct(current->mm, vma)) {
5589@@ -286,7 +299,7 @@ static int __init gate_vma_init(void)
5590 gate_vma.vm_start = FIXADDR_USER_START;
5591 gate_vma.vm_end = FIXADDR_USER_END;
5592 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
5593- gate_vma.vm_page_prot = __P101;
5594+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
5595
5596 return 0;
5597 }
5598diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5599index 40b3ee98..8c2c112 100644
5600--- a/arch/m32r/include/asm/cache.h
5601+++ b/arch/m32r/include/asm/cache.h
5602@@ -1,8 +1,10 @@
5603 #ifndef _ASM_M32R_CACHE_H
5604 #define _ASM_M32R_CACHE_H
5605
5606+#include <linux/const.h>
5607+
5608 /* L1 cache line size */
5609 #define L1_CACHE_SHIFT 4
5610-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5611+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5612
5613 #endif /* _ASM_M32R_CACHE_H */
5614diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5615index 82abd15..d95ae5d 100644
5616--- a/arch/m32r/lib/usercopy.c
5617+++ b/arch/m32r/lib/usercopy.c
5618@@ -14,6 +14,9 @@
5619 unsigned long
5620 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5621 {
5622+ if ((long)n < 0)
5623+ return n;
5624+
5625 prefetch(from);
5626 if (access_ok(VERIFY_WRITE, to, n))
5627 __copy_user(to,from,n);
5628@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5629 unsigned long
5630 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5631 {
5632+ if ((long)n < 0)
5633+ return n;
5634+
5635 prefetchw(to);
5636 if (access_ok(VERIFY_READ, from, n))
5637 __copy_user_zeroing(to,from,n);
5638diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5639index 0395c51..5f26031 100644
5640--- a/arch/m68k/include/asm/cache.h
5641+++ b/arch/m68k/include/asm/cache.h
5642@@ -4,9 +4,11 @@
5643 #ifndef __ARCH_M68K_CACHE_H
5644 #define __ARCH_M68K_CACHE_H
5645
5646+#include <linux/const.h>
5647+
5648 /* bytes per L1 cache line */
5649 #define L1_CACHE_SHIFT 4
5650-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5651+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5652
5653 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5654
5655diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
5656index d703d8e..a8e2d70 100644
5657--- a/arch/metag/include/asm/barrier.h
5658+++ b/arch/metag/include/asm/barrier.h
5659@@ -90,7 +90,7 @@ static inline void fence(void)
5660 do { \
5661 compiletime_assert_atomic_type(*p); \
5662 smp_mb(); \
5663- ACCESS_ONCE(*p) = (v); \
5664+ ACCESS_ONCE_RW(*p) = (v); \
5665 } while (0)
5666
5667 #define smp_load_acquire(p) \
5668diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5669index 7ca80ac..794ba72 100644
5670--- a/arch/metag/mm/hugetlbpage.c
5671+++ b/arch/metag/mm/hugetlbpage.c
5672@@ -194,6 +194,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5673 info.high_limit = TASK_SIZE;
5674 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5675 info.align_offset = 0;
5676+ info.threadstack_offset = 0;
5677 return vm_unmapped_area(&info);
5678 }
5679
5680diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5681index 4efe96a..60e8699 100644
5682--- a/arch/microblaze/include/asm/cache.h
5683+++ b/arch/microblaze/include/asm/cache.h
5684@@ -13,11 +13,12 @@
5685 #ifndef _ASM_MICROBLAZE_CACHE_H
5686 #define _ASM_MICROBLAZE_CACHE_H
5687
5688+#include <linux/const.h>
5689 #include <asm/registers.h>
5690
5691 #define L1_CACHE_SHIFT 5
5692 /* word-granular cache in microblaze */
5693-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5694+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5695
5696 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5697
5698diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
5699index 1a313c4..f27b613 100644
5700--- a/arch/mips/Kconfig
5701+++ b/arch/mips/Kconfig
5702@@ -2504,6 +2504,7 @@ source "kernel/Kconfig.preempt"
5703
5704 config KEXEC
5705 bool "Kexec system call"
5706+ depends on !GRKERNSEC_KMEM
5707 help
5708 kexec is a system call that implements the ability to shutdown your
5709 current kernel, and to start another kernel. It is like a reboot
5710diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
5711index d8960d4..77dbd31 100644
5712--- a/arch/mips/cavium-octeon/dma-octeon.c
5713+++ b/arch/mips/cavium-octeon/dma-octeon.c
5714@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
5715 if (dma_release_from_coherent(dev, order, vaddr))
5716 return;
5717
5718- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
5719+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
5720 }
5721
5722 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
5723diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5724index 26d4363..3c9a82e 100644
5725--- a/arch/mips/include/asm/atomic.h
5726+++ b/arch/mips/include/asm/atomic.h
5727@@ -22,15 +22,39 @@
5728 #include <asm/cmpxchg.h>
5729 #include <asm/war.h>
5730
5731+#ifdef CONFIG_GENERIC_ATOMIC64
5732+#include <asm-generic/atomic64.h>
5733+#endif
5734+
5735 #define ATOMIC_INIT(i) { (i) }
5736
5737+#ifdef CONFIG_64BIT
5738+#define _ASM_EXTABLE(from, to) \
5739+" .section __ex_table,\"a\"\n" \
5740+" .dword " #from ", " #to"\n" \
5741+" .previous\n"
5742+#else
5743+#define _ASM_EXTABLE(from, to) \
5744+" .section __ex_table,\"a\"\n" \
5745+" .word " #from ", " #to"\n" \
5746+" .previous\n"
5747+#endif
5748+
5749 /*
5750 * atomic_read - read atomic variable
5751 * @v: pointer of type atomic_t
5752 *
5753 * Atomically reads the value of @v.
5754 */
5755-#define atomic_read(v) ACCESS_ONCE((v)->counter)
5756+static inline int atomic_read(const atomic_t *v)
5757+{
5758+ return ACCESS_ONCE(v->counter);
5759+}
5760+
5761+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5762+{
5763+ return ACCESS_ONCE(v->counter);
5764+}
5765
5766 /*
5767 * atomic_set - set atomic variable
5768@@ -39,47 +63,77 @@
5769 *
5770 * Atomically sets the value of @v to @i.
5771 */
5772-#define atomic_set(v, i) ((v)->counter = (i))
5773+static inline void atomic_set(atomic_t *v, int i)
5774+{
5775+ v->counter = i;
5776+}
5777
5778-#define ATOMIC_OP(op, c_op, asm_op) \
5779-static __inline__ void atomic_##op(int i, atomic_t * v) \
5780+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5781+{
5782+ v->counter = i;
5783+}
5784+
5785+#ifdef CONFIG_PAX_REFCOUNT
5786+#define __OVERFLOW_POST \
5787+ " b 4f \n" \
5788+ " .set noreorder \n" \
5789+ "3: b 5f \n" \
5790+ " move %0, %1 \n" \
5791+ " .set reorder \n"
5792+#define __OVERFLOW_EXTABLE \
5793+ "3:\n" \
5794+ _ASM_EXTABLE(2b, 3b)
5795+#else
5796+#define __OVERFLOW_POST
5797+#define __OVERFLOW_EXTABLE
5798+#endif
5799+
5800+#define __ATOMIC_OP(op, suffix, asm_op, extable) \
5801+static inline void atomic_##op##suffix(int i, atomic##suffix##_t * v) \
5802 { \
5803 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
5804 int temp; \
5805 \
5806 __asm__ __volatile__( \
5807- " .set arch=r4000 \n" \
5808- "1: ll %0, %1 # atomic_" #op " \n" \
5809- " " #asm_op " %0, %2 \n" \
5810+ " .set mips3 \n" \
5811+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5812+ "2: " #asm_op " %0, %2 \n" \
5813 " sc %0, %1 \n" \
5814 " beqzl %0, 1b \n" \
5815+ extable \
5816 " .set mips0 \n" \
5817 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
5818 : "Ir" (i)); \
5819 } else if (kernel_uses_llsc) { \
5820 int temp; \
5821 \
5822- do { \
5823- __asm__ __volatile__( \
5824- " .set "MIPS_ISA_LEVEL" \n" \
5825- " ll %0, %1 # atomic_" #op "\n" \
5826- " " #asm_op " %0, %2 \n" \
5827- " sc %0, %1 \n" \
5828- " .set mips0 \n" \
5829- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
5830- : "Ir" (i)); \
5831- } while (unlikely(!temp)); \
5832+ __asm__ __volatile__( \
5833+ " .set "MIPS_ISA_LEVEL" \n" \
5834+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5835+ "2: " #asm_op " %0, %2 \n" \
5836+ " sc %0, %1 \n" \
5837+ " beqz %0, 1b \n" \
5838+ extable \
5839+ " .set mips0 \n" \
5840+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
5841+ : "Ir" (i)); \
5842 } else { \
5843 unsigned long flags; \
5844 \
5845 raw_local_irq_save(flags); \
5846- v->counter c_op i; \
5847+ __asm__ __volatile__( \
5848+ "2: " #asm_op " %0, %1 \n" \
5849+ extable \
5850+ : "+r" (v->counter) : "Ir" (i)); \
5851 raw_local_irq_restore(flags); \
5852 } \
5853 }
5854
5855-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
5856-static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5857+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, _unchecked, asm_op##u, ) \
5858+ __ATOMIC_OP(op, , asm_op, __OVERFLOW_EXTABLE)
5859+
5860+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op, extable) \
5861+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t * v) \
5862 { \
5863 int result; \
5864 \
5865@@ -89,12 +143,15 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5866 int temp; \
5867 \
5868 __asm__ __volatile__( \
5869- " .set arch=r4000 \n" \
5870- "1: ll %1, %2 # atomic_" #op "_return \n" \
5871- " " #asm_op " %0, %1, %3 \n" \
5872+ " .set mips3 \n" \
5873+ "1: ll %1, %2 # atomic_" #op "_return" #suffix"\n" \
5874+ "2: " #asm_op " %0, %1, %3 \n" \
5875 " sc %0, %2 \n" \
5876 " beqzl %0, 1b \n" \
5877- " " #asm_op " %0, %1, %3 \n" \
5878+ post_op \
5879+ extable \
5880+ "4: " #asm_op " %0, %1, %3 \n" \
5881+ "5: \n" \
5882 " .set mips0 \n" \
5883 : "=&r" (result), "=&r" (temp), \
5884 "+" GCC_OFF_SMALL_ASM() (v->counter) \
5885@@ -102,26 +159,33 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5886 } else if (kernel_uses_llsc) { \
5887 int temp; \
5888 \
5889- do { \
5890- __asm__ __volatile__( \
5891- " .set "MIPS_ISA_LEVEL" \n" \
5892- " ll %1, %2 # atomic_" #op "_return \n" \
5893- " " #asm_op " %0, %1, %3 \n" \
5894- " sc %0, %2 \n" \
5895- " .set mips0 \n" \
5896- : "=&r" (result), "=&r" (temp), \
5897- "+" GCC_OFF_SMALL_ASM() (v->counter) \
5898- : "Ir" (i)); \
5899- } while (unlikely(!result)); \
5900+ __asm__ __volatile__( \
5901+ " .set "MIPS_ISA_LEVEL" \n" \
5902+ "1: ll %1, %2 # atomic_" #op "_return" #suffix "\n" \
5903+ "2: " #asm_op " %0, %1, %3 \n" \
5904+ " sc %0, %2 \n" \
5905+ post_op \
5906+ extable \
5907+ "4: " #asm_op " %0, %1, %3 \n" \
5908+ "5: \n" \
5909+ " .set mips0 \n" \
5910+ : "=&r" (result), "=&r" (temp), \
5911+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
5912+ : "Ir" (i)); \
5913 \
5914 result = temp; result c_op i; \
5915 } else { \
5916 unsigned long flags; \
5917 \
5918 raw_local_irq_save(flags); \
5919- result = v->counter; \
5920- result c_op i; \
5921- v->counter = result; \
5922+ __asm__ __volatile__( \
5923+ " lw %0, %1 \n" \
5924+ "2: " #asm_op " %0, %1, %2 \n" \
5925+ " sw %0, %1 \n" \
5926+ "3: \n" \
5927+ extable \
5928+ : "=&r" (result), "+" GCC_OFF_SMALL_ASM() (v->counter) \
5929+ : "Ir" (i)); \
5930 raw_local_irq_restore(flags); \
5931 } \
5932 \
5933@@ -130,16 +194,21 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5934 return result; \
5935 }
5936
5937-#define ATOMIC_OPS(op, c_op, asm_op) \
5938- ATOMIC_OP(op, c_op, asm_op) \
5939- ATOMIC_OP_RETURN(op, c_op, asm_op)
5940+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, asm_op##u, , ) \
5941+ __ATOMIC_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
5942
5943-ATOMIC_OPS(add, +=, addu)
5944-ATOMIC_OPS(sub, -=, subu)
5945+#define ATOMIC_OPS(op, asm_op) \
5946+ ATOMIC_OP(op, asm_op) \
5947+ ATOMIC_OP_RETURN(op, asm_op)
5948+
5949+ATOMIC_OPS(add, add)
5950+ATOMIC_OPS(sub, sub)
5951
5952 #undef ATOMIC_OPS
5953 #undef ATOMIC_OP_RETURN
5954+#undef __ATOMIC_OP_RETURN
5955 #undef ATOMIC_OP
5956+#undef __ATOMIC_OP
5957
5958 /*
5959 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
5960@@ -149,7 +218,7 @@ ATOMIC_OPS(sub, -=, subu)
5961 * Atomically test @v and subtract @i if @v is greater or equal than @i.
5962 * The function returns the old value of @v minus @i.
5963 */
5964-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5965+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
5966 {
5967 int result;
5968
5969@@ -159,7 +228,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5970 int temp;
5971
5972 __asm__ __volatile__(
5973- " .set arch=r4000 \n"
5974+ " .set "MIPS_ISA_LEVEL" \n"
5975 "1: ll %1, %2 # atomic_sub_if_positive\n"
5976 " subu %0, %1, %3 \n"
5977 " bltz %0, 1f \n"
5978@@ -208,8 +277,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5979 return result;
5980 }
5981
5982-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5983-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
5984+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
5985+{
5986+ return cmpxchg(&v->counter, old, new);
5987+}
5988+
5989+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
5990+ int new)
5991+{
5992+ return cmpxchg(&(v->counter), old, new);
5993+}
5994+
5995+static inline int atomic_xchg(atomic_t *v, int new)
5996+{
5997+ return xchg(&v->counter, new);
5998+}
5999+
6000+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6001+{
6002+ return xchg(&(v->counter), new);
6003+}
6004
6005 /**
6006 * __atomic_add_unless - add unless the number is a given value
6007@@ -237,6 +324,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6008
6009 #define atomic_dec_return(v) atomic_sub_return(1, (v))
6010 #define atomic_inc_return(v) atomic_add_return(1, (v))
6011+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6012+{
6013+ return atomic_add_return_unchecked(1, v);
6014+}
6015
6016 /*
6017 * atomic_sub_and_test - subtract value from variable and test result
6018@@ -258,6 +349,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6019 * other cases.
6020 */
6021 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
6022+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6023+{
6024+ return atomic_add_return_unchecked(1, v) == 0;
6025+}
6026
6027 /*
6028 * atomic_dec_and_test - decrement by 1 and test
6029@@ -282,6 +377,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6030 * Atomically increments @v by 1.
6031 */
6032 #define atomic_inc(v) atomic_add(1, (v))
6033+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
6034+{
6035+ atomic_add_unchecked(1, v);
6036+}
6037
6038 /*
6039 * atomic_dec - decrement and test
6040@@ -290,6 +389,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6041 * Atomically decrements @v by 1.
6042 */
6043 #define atomic_dec(v) atomic_sub(1, (v))
6044+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
6045+{
6046+ atomic_sub_unchecked(1, v);
6047+}
6048
6049 /*
6050 * atomic_add_negative - add and test if negative
6051@@ -311,54 +414,77 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6052 * @v: pointer of type atomic64_t
6053 *
6054 */
6055-#define atomic64_read(v) ACCESS_ONCE((v)->counter)
6056+static inline long atomic64_read(const atomic64_t *v)
6057+{
6058+ return ACCESS_ONCE(v->counter);
6059+}
6060+
6061+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6062+{
6063+ return ACCESS_ONCE(v->counter);
6064+}
6065
6066 /*
6067 * atomic64_set - set atomic variable
6068 * @v: pointer of type atomic64_t
6069 * @i: required value
6070 */
6071-#define atomic64_set(v, i) ((v)->counter = (i))
6072+static inline void atomic64_set(atomic64_t *v, long i)
6073+{
6074+ v->counter = i;
6075+}
6076
6077-#define ATOMIC64_OP(op, c_op, asm_op) \
6078-static __inline__ void atomic64_##op(long i, atomic64_t * v) \
6079+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6080+{
6081+ v->counter = i;
6082+}
6083+
6084+#define __ATOMIC64_OP(op, suffix, asm_op, extable) \
6085+static inline void atomic64_##op##suffix(long i, atomic64##suffix##_t * v) \
6086 { \
6087 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
6088 long temp; \
6089 \
6090 __asm__ __volatile__( \
6091- " .set arch=r4000 \n" \
6092- "1: lld %0, %1 # atomic64_" #op " \n" \
6093- " " #asm_op " %0, %2 \n" \
6094+ " .set "MIPS_ISA_LEVEL" \n" \
6095+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6096+ "2: " #asm_op " %0, %2 \n" \
6097 " scd %0, %1 \n" \
6098 " beqzl %0, 1b \n" \
6099+ extable \
6100 " .set mips0 \n" \
6101 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6102 : "Ir" (i)); \
6103 } else if (kernel_uses_llsc) { \
6104 long temp; \
6105 \
6106- do { \
6107- __asm__ __volatile__( \
6108- " .set "MIPS_ISA_LEVEL" \n" \
6109- " lld %0, %1 # atomic64_" #op "\n" \
6110- " " #asm_op " %0, %2 \n" \
6111- " scd %0, %1 \n" \
6112- " .set mips0 \n" \
6113- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6114- : "Ir" (i)); \
6115- } while (unlikely(!temp)); \
6116+ __asm__ __volatile__( \
6117+ " .set "MIPS_ISA_LEVEL" \n" \
6118+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6119+ "2: " #asm_op " %0, %2 \n" \
6120+ " scd %0, %1 \n" \
6121+ " beqz %0, 1b \n" \
6122+ extable \
6123+ " .set mips0 \n" \
6124+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6125+ : "Ir" (i)); \
6126 } else { \
6127 unsigned long flags; \
6128 \
6129 raw_local_irq_save(flags); \
6130- v->counter c_op i; \
6131+ __asm__ __volatile__( \
6132+ "2: " #asm_op " %0, %1 \n" \
6133+ extable \
6134+ : "+" GCC_OFF_SMALL_ASM() (v->counter) : "Ir" (i)); \
6135 raw_local_irq_restore(flags); \
6136 } \
6137 }
6138
6139-#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
6140-static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6141+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, _unchecked, asm_op##u, ) \
6142+ __ATOMIC64_OP(op, , asm_op, __OVERFLOW_EXTABLE)
6143+
6144+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op, extable) \
6145+static inline long atomic64_##op##_return##suffix(long i, atomic64##suffix##_t * v)\
6146 { \
6147 long result; \
6148 \
6149@@ -368,12 +494,15 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6150 long temp; \
6151 \
6152 __asm__ __volatile__( \
6153- " .set arch=r4000 \n" \
6154+ " .set mips3 \n" \
6155 "1: lld %1, %2 # atomic64_" #op "_return\n" \
6156- " " #asm_op " %0, %1, %3 \n" \
6157+ "2: " #asm_op " %0, %1, %3 \n" \
6158 " scd %0, %2 \n" \
6159 " beqzl %0, 1b \n" \
6160- " " #asm_op " %0, %1, %3 \n" \
6161+ post_op \
6162+ extable \
6163+ "4: " #asm_op " %0, %1, %3 \n" \
6164+ "5: \n" \
6165 " .set mips0 \n" \
6166 : "=&r" (result), "=&r" (temp), \
6167 "+" GCC_OFF_SMALL_ASM() (v->counter) \
6168@@ -381,27 +510,35 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6169 } else if (kernel_uses_llsc) { \
6170 long temp; \
6171 \
6172- do { \
6173- __asm__ __volatile__( \
6174- " .set "MIPS_ISA_LEVEL" \n" \
6175- " lld %1, %2 # atomic64_" #op "_return\n" \
6176- " " #asm_op " %0, %1, %3 \n" \
6177- " scd %0, %2 \n" \
6178- " .set mips0 \n" \
6179- : "=&r" (result), "=&r" (temp), \
6180- "=" GCC_OFF_SMALL_ASM() (v->counter) \
6181- : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
6182- : "memory"); \
6183- } while (unlikely(!result)); \
6184+ __asm__ __volatile__( \
6185+ " .set "MIPS_ISA_LEVEL" \n" \
6186+ "1: lld %1, %2 # atomic64_" #op "_return" #suffix "\n"\
6187+ "2: " #asm_op " %0, %1, %3 \n" \
6188+ " scd %0, %2 \n" \
6189+ " beqz %0, 1b \n" \
6190+ post_op \
6191+ extable \
6192+ "4: " #asm_op " %0, %1, %3 \n" \
6193+ "5: \n" \
6194+ " .set mips0 \n" \
6195+ : "=&r" (result), "=&r" (temp), \
6196+ "=" GCC_OFF_SMALL_ASM() (v->counter) \
6197+ : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
6198+ : "memory"); \
6199 \
6200 result = temp; result c_op i; \
6201 } else { \
6202 unsigned long flags; \
6203 \
6204 raw_local_irq_save(flags); \
6205- result = v->counter; \
6206- result c_op i; \
6207- v->counter = result; \
6208+ __asm__ __volatile__( \
6209+ " ld %0, %1 \n" \
6210+ "2: " #asm_op " %0, %1, %2 \n" \
6211+ " sd %0, %1 \n" \
6212+ "3: \n" \
6213+ extable \
6214+ : "=&r" (result), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6215+ : "Ir" (i)); \
6216 raw_local_irq_restore(flags); \
6217 } \
6218 \
6219@@ -410,16 +547,23 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6220 return result; \
6221 }
6222
6223-#define ATOMIC64_OPS(op, c_op, asm_op) \
6224- ATOMIC64_OP(op, c_op, asm_op) \
6225- ATOMIC64_OP_RETURN(op, c_op, asm_op)
6226+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, _unchecked, asm_op##u, , ) \
6227+ __ATOMIC64_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
6228
6229-ATOMIC64_OPS(add, +=, daddu)
6230-ATOMIC64_OPS(sub, -=, dsubu)
6231+#define ATOMIC64_OPS(op, asm_op) \
6232+ ATOMIC64_OP(op, asm_op) \
6233+ ATOMIC64_OP_RETURN(op, asm_op)
6234+
6235+ATOMIC64_OPS(add, dadd)
6236+ATOMIC64_OPS(sub, dsub)
6237
6238 #undef ATOMIC64_OPS
6239 #undef ATOMIC64_OP_RETURN
6240+#undef __ATOMIC64_OP_RETURN
6241 #undef ATOMIC64_OP
6242+#undef __ATOMIC64_OP
6243+#undef __OVERFLOW_EXTABLE
6244+#undef __OVERFLOW_POST
6245
6246 /*
6247 * atomic64_sub_if_positive - conditionally subtract integer from atomic
6248@@ -430,7 +574,7 @@ ATOMIC64_OPS(sub, -=, dsubu)
6249 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6250 * The function returns the old value of @v minus @i.
6251 */
6252-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6253+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6254 {
6255 long result;
6256
6257@@ -440,7 +584,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6258 long temp;
6259
6260 __asm__ __volatile__(
6261- " .set arch=r4000 \n"
6262+ " .set "MIPS_ISA_LEVEL" \n"
6263 "1: lld %1, %2 # atomic64_sub_if_positive\n"
6264 " dsubu %0, %1, %3 \n"
6265 " bltz %0, 1f \n"
6266@@ -489,9 +633,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6267 return result;
6268 }
6269
6270-#define atomic64_cmpxchg(v, o, n) \
6271- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6272-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6273+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6274+{
6275+ return cmpxchg(&v->counter, old, new);
6276+}
6277+
6278+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6279+ long new)
6280+{
6281+ return cmpxchg(&(v->counter), old, new);
6282+}
6283+
6284+static inline long atomic64_xchg(atomic64_t *v, long new)
6285+{
6286+ return xchg(&v->counter, new);
6287+}
6288+
6289+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6290+{
6291+ return xchg(&(v->counter), new);
6292+}
6293
6294 /**
6295 * atomic64_add_unless - add unless the number is a given value
6296@@ -521,6 +682,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6297
6298 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6299 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6300+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6301
6302 /*
6303 * atomic64_sub_and_test - subtract value from variable and test result
6304@@ -542,6 +704,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6305 * other cases.
6306 */
6307 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6308+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6309
6310 /*
6311 * atomic64_dec_and_test - decrement by 1 and test
6312@@ -566,6 +729,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6313 * Atomically increments @v by 1.
6314 */
6315 #define atomic64_inc(v) atomic64_add(1, (v))
6316+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6317
6318 /*
6319 * atomic64_dec - decrement and test
6320@@ -574,6 +738,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6321 * Atomically decrements @v by 1.
6322 */
6323 #define atomic64_dec(v) atomic64_sub(1, (v))
6324+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
6325
6326 /*
6327 * atomic64_add_negative - add and test if negative
6328diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
6329index 2b8bbbc..4556df6 100644
6330--- a/arch/mips/include/asm/barrier.h
6331+++ b/arch/mips/include/asm/barrier.h
6332@@ -133,7 +133,7 @@
6333 do { \
6334 compiletime_assert_atomic_type(*p); \
6335 smp_mb(); \
6336- ACCESS_ONCE(*p) = (v); \
6337+ ACCESS_ONCE_RW(*p) = (v); \
6338 } while (0)
6339
6340 #define smp_load_acquire(p) \
6341diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6342index b4db69f..8f3b093 100644
6343--- a/arch/mips/include/asm/cache.h
6344+++ b/arch/mips/include/asm/cache.h
6345@@ -9,10 +9,11 @@
6346 #ifndef _ASM_CACHE_H
6347 #define _ASM_CACHE_H
6348
6349+#include <linux/const.h>
6350 #include <kmalloc.h>
6351
6352 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6353-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6354+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6355
6356 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6357 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6358diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6359index 694925a..990fa62 100644
6360--- a/arch/mips/include/asm/elf.h
6361+++ b/arch/mips/include/asm/elf.h
6362@@ -410,15 +410,18 @@ extern const char *__elf_platform;
6363 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6364 #endif
6365
6366+#ifdef CONFIG_PAX_ASLR
6367+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6368+
6369+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6370+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6371+#endif
6372+
6373 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6374 struct linux_binprm;
6375 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6376 int uses_interp);
6377
6378-struct mm_struct;
6379-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6380-#define arch_randomize_brk arch_randomize_brk
6381-
6382 struct arch_elf_state {
6383 int fp_abi;
6384 int interp_fp_abi;
6385diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6386index c1f6afa..38cc6e9 100644
6387--- a/arch/mips/include/asm/exec.h
6388+++ b/arch/mips/include/asm/exec.h
6389@@ -12,6 +12,6 @@
6390 #ifndef _ASM_EXEC_H
6391 #define _ASM_EXEC_H
6392
6393-extern unsigned long arch_align_stack(unsigned long sp);
6394+#define arch_align_stack(x) ((x) & ~0xfUL)
6395
6396 #endif /* _ASM_EXEC_H */
6397diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
6398index 9e8ef59..1139d6b 100644
6399--- a/arch/mips/include/asm/hw_irq.h
6400+++ b/arch/mips/include/asm/hw_irq.h
6401@@ -10,7 +10,7 @@
6402
6403 #include <linux/atomic.h>
6404
6405-extern atomic_t irq_err_count;
6406+extern atomic_unchecked_t irq_err_count;
6407
6408 /*
6409 * interrupt-retrigger: NOP for now. This may not be appropriate for all
6410diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6411index 8feaed6..1bd8a64 100644
6412--- a/arch/mips/include/asm/local.h
6413+++ b/arch/mips/include/asm/local.h
6414@@ -13,15 +13,25 @@ typedef struct
6415 atomic_long_t a;
6416 } local_t;
6417
6418+typedef struct {
6419+ atomic_long_unchecked_t a;
6420+} local_unchecked_t;
6421+
6422 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6423
6424 #define local_read(l) atomic_long_read(&(l)->a)
6425+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6426 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6427+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6428
6429 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6430+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6431 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6432+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6433 #define local_inc(l) atomic_long_inc(&(l)->a)
6434+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6435 #define local_dec(l) atomic_long_dec(&(l)->a)
6436+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6437
6438 /*
6439 * Same as above, but return the result value
6440@@ -71,6 +81,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6441 return result;
6442 }
6443
6444+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6445+{
6446+ unsigned long result;
6447+
6448+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6449+ unsigned long temp;
6450+
6451+ __asm__ __volatile__(
6452+ " .set mips3 \n"
6453+ "1:" __LL "%1, %2 # local_add_return \n"
6454+ " addu %0, %1, %3 \n"
6455+ __SC "%0, %2 \n"
6456+ " beqzl %0, 1b \n"
6457+ " addu %0, %1, %3 \n"
6458+ " .set mips0 \n"
6459+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6460+ : "Ir" (i), "m" (l->a.counter)
6461+ : "memory");
6462+ } else if (kernel_uses_llsc) {
6463+ unsigned long temp;
6464+
6465+ __asm__ __volatile__(
6466+ " .set mips3 \n"
6467+ "1:" __LL "%1, %2 # local_add_return \n"
6468+ " addu %0, %1, %3 \n"
6469+ __SC "%0, %2 \n"
6470+ " beqz %0, 1b \n"
6471+ " addu %0, %1, %3 \n"
6472+ " .set mips0 \n"
6473+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6474+ : "Ir" (i), "m" (l->a.counter)
6475+ : "memory");
6476+ } else {
6477+ unsigned long flags;
6478+
6479+ local_irq_save(flags);
6480+ result = l->a.counter;
6481+ result += i;
6482+ l->a.counter = result;
6483+ local_irq_restore(flags);
6484+ }
6485+
6486+ return result;
6487+}
6488+
6489 static __inline__ long local_sub_return(long i, local_t * l)
6490 {
6491 unsigned long result;
6492@@ -118,6 +173,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6493
6494 #define local_cmpxchg(l, o, n) \
6495 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6496+#define local_cmpxchg_unchecked(l, o, n) \
6497+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6498 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6499
6500 /**
6501diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6502index 154b70a..426ae3d 100644
6503--- a/arch/mips/include/asm/page.h
6504+++ b/arch/mips/include/asm/page.h
6505@@ -120,7 +120,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6506 #ifdef CONFIG_CPU_MIPS32
6507 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6508 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6509- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6510+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6511 #else
6512 typedef struct { unsigned long long pte; } pte_t;
6513 #define pte_val(x) ((x).pte)
6514diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6515index b336037..5b874cc 100644
6516--- a/arch/mips/include/asm/pgalloc.h
6517+++ b/arch/mips/include/asm/pgalloc.h
6518@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6519 {
6520 set_pud(pud, __pud((unsigned long)pmd));
6521 }
6522+
6523+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6524+{
6525+ pud_populate(mm, pud, pmd);
6526+}
6527 #endif
6528
6529 /*
6530diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
6531index f8f809f..b5f3fa4 100644
6532--- a/arch/mips/include/asm/pgtable.h
6533+++ b/arch/mips/include/asm/pgtable.h
6534@@ -20,6 +20,9 @@
6535 #include <asm/io.h>
6536 #include <asm/pgtable-bits.h>
6537
6538+#define ktla_ktva(addr) (addr)
6539+#define ktva_ktla(addr) (addr)
6540+
6541 struct mm_struct;
6542 struct vm_area_struct;
6543
6544diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6545index 55ed660..3dc9422 100644
6546--- a/arch/mips/include/asm/thread_info.h
6547+++ b/arch/mips/include/asm/thread_info.h
6548@@ -102,6 +102,9 @@ static inline struct thread_info *current_thread_info(void)
6549 #define TIF_SECCOMP 4 /* secure computing */
6550 #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
6551 #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
6552+/* li takes a 32bit immediate */
6553+#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
6554+
6555 #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
6556 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
6557 #define TIF_NOHZ 19 /* in adaptive nohz mode */
6558@@ -137,14 +140,16 @@ static inline struct thread_info *current_thread_info(void)
6559 #define _TIF_USEDMSA (1<<TIF_USEDMSA)
6560 #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
6561 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6562+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6563
6564 #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6565 _TIF_SYSCALL_AUDIT | \
6566- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
6567+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
6568+ _TIF_GRSEC_SETXID)
6569
6570 /* work to do in syscall_trace_leave() */
6571 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6572- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6573+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6574
6575 /* work to do on interrupt/exception return */
6576 #define _TIF_WORK_MASK \
6577@@ -152,7 +157,7 @@ static inline struct thread_info *current_thread_info(void)
6578 /* work to do on any return to u-space */
6579 #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
6580 _TIF_WORK_SYSCALL_EXIT | \
6581- _TIF_SYSCALL_TRACEPOINT)
6582+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6583
6584 /*
6585 * We stash processor id into a COP0 register to retrieve it fast
6586diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
6587index bf8b324..cec5705 100644
6588--- a/arch/mips/include/asm/uaccess.h
6589+++ b/arch/mips/include/asm/uaccess.h
6590@@ -130,6 +130,7 @@ extern u64 __ua_limit;
6591 __ok == 0; \
6592 })
6593
6594+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
6595 #define access_ok(type, addr, size) \
6596 likely(__access_ok((addr), (size), __access_mask))
6597
6598diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
6599index 1188e00..41cf144 100644
6600--- a/arch/mips/kernel/binfmt_elfn32.c
6601+++ b/arch/mips/kernel/binfmt_elfn32.c
6602@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6603 #undef ELF_ET_DYN_BASE
6604 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6605
6606+#ifdef CONFIG_PAX_ASLR
6607+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6608+
6609+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6610+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6611+#endif
6612+
6613 #include <asm/processor.h>
6614 #include <linux/module.h>
6615 #include <linux/elfcore.h>
6616diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
6617index 9287678..f870e47 100644
6618--- a/arch/mips/kernel/binfmt_elfo32.c
6619+++ b/arch/mips/kernel/binfmt_elfo32.c
6620@@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6621 #undef ELF_ET_DYN_BASE
6622 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6623
6624+#ifdef CONFIG_PAX_ASLR
6625+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6626+
6627+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6628+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6629+#endif
6630+
6631 #include <asm/processor.h>
6632
6633 #include <linux/module.h>
6634diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
6635index a74ec3a..4f06f18 100644
6636--- a/arch/mips/kernel/i8259.c
6637+++ b/arch/mips/kernel/i8259.c
6638@@ -202,7 +202,7 @@ spurious_8259A_irq:
6639 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
6640 spurious_irq_mask |= irqmask;
6641 }
6642- atomic_inc(&irq_err_count);
6643+ atomic_inc_unchecked(&irq_err_count);
6644 /*
6645 * Theoretically we do not have to handle this IRQ,
6646 * but in Linux this does not cause problems and is
6647diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
6648index 44a1f79..2bd6aa3 100644
6649--- a/arch/mips/kernel/irq-gt641xx.c
6650+++ b/arch/mips/kernel/irq-gt641xx.c
6651@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
6652 }
6653 }
6654
6655- atomic_inc(&irq_err_count);
6656+ atomic_inc_unchecked(&irq_err_count);
6657 }
6658
6659 void __init gt641xx_irq_init(void)
6660diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
6661index d2bfbc2..a8eacd2 100644
6662--- a/arch/mips/kernel/irq.c
6663+++ b/arch/mips/kernel/irq.c
6664@@ -76,17 +76,17 @@ void ack_bad_irq(unsigned int irq)
6665 printk("unexpected IRQ # %d\n", irq);
6666 }
6667
6668-atomic_t irq_err_count;
6669+atomic_unchecked_t irq_err_count;
6670
6671 int arch_show_interrupts(struct seq_file *p, int prec)
6672 {
6673- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
6674+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
6675 return 0;
6676 }
6677
6678 asmlinkage void spurious_interrupt(void)
6679 {
6680- atomic_inc(&irq_err_count);
6681+ atomic_inc_unchecked(&irq_err_count);
6682 }
6683
6684 void __init init_IRQ(void)
6685@@ -109,7 +109,10 @@ void __init init_IRQ(void)
6686 #endif
6687 }
6688
6689+
6690 #ifdef DEBUG_STACKOVERFLOW
6691+extern void gr_handle_kernel_exploit(void);
6692+
6693 static inline void check_stack_overflow(void)
6694 {
6695 unsigned long sp;
6696@@ -125,6 +128,7 @@ static inline void check_stack_overflow(void)
6697 printk("do_IRQ: stack overflow: %ld\n",
6698 sp - sizeof(struct thread_info));
6699 dump_stack();
6700+ gr_handle_kernel_exploit();
6701 }
6702 }
6703 #else
6704diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
6705index 0614717..002fa43 100644
6706--- a/arch/mips/kernel/pm-cps.c
6707+++ b/arch/mips/kernel/pm-cps.c
6708@@ -172,7 +172,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
6709 nc_core_ready_count = nc_addr;
6710
6711 /* Ensure ready_count is zero-initialised before the assembly runs */
6712- ACCESS_ONCE(*nc_core_ready_count) = 0;
6713+ ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
6714 coupled_barrier(&per_cpu(pm_barrier, core), online);
6715
6716 /* Run the generated entry code */
6717diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
6718index bf85cc1..b365c61 100644
6719--- a/arch/mips/kernel/process.c
6720+++ b/arch/mips/kernel/process.c
6721@@ -535,18 +535,6 @@ out:
6722 return pc;
6723 }
6724
6725-/*
6726- * Don't forget that the stack pointer must be aligned on a 8 bytes
6727- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
6728- */
6729-unsigned long arch_align_stack(unsigned long sp)
6730-{
6731- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6732- sp -= get_random_int() & ~PAGE_MASK;
6733-
6734- return sp & ALMASK;
6735-}
6736-
6737 static void arch_dump_stack(void *info)
6738 {
6739 struct pt_regs *regs;
6740diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
6741index 5104528..950bbdc 100644
6742--- a/arch/mips/kernel/ptrace.c
6743+++ b/arch/mips/kernel/ptrace.c
6744@@ -761,6 +761,10 @@ long arch_ptrace(struct task_struct *child, long request,
6745 return ret;
6746 }
6747
6748+#ifdef CONFIG_GRKERNSEC_SETXID
6749+extern void gr_delayed_cred_worker(void);
6750+#endif
6751+
6752 /*
6753 * Notification of system call entry/exit
6754 * - triggered by current->work.syscall_trace
6755@@ -779,6 +783,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
6756 tracehook_report_syscall_entry(regs))
6757 ret = -1;
6758
6759+#ifdef CONFIG_GRKERNSEC_SETXID
6760+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6761+ gr_delayed_cred_worker();
6762+#endif
6763+
6764 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6765 trace_sys_enter(regs, regs->regs[2]);
6766
6767diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
6768index 07fc524..b9d7f28 100644
6769--- a/arch/mips/kernel/reset.c
6770+++ b/arch/mips/kernel/reset.c
6771@@ -13,6 +13,7 @@
6772 #include <linux/reboot.h>
6773
6774 #include <asm/reboot.h>
6775+#include <asm/bug.h>
6776
6777 /*
6778 * Urgs ... Too many MIPS machines to handle this in a generic way.
6779@@ -29,16 +30,19 @@ void machine_restart(char *command)
6780 {
6781 if (_machine_restart)
6782 _machine_restart(command);
6783+ BUG();
6784 }
6785
6786 void machine_halt(void)
6787 {
6788 if (_machine_halt)
6789 _machine_halt();
6790+ BUG();
6791 }
6792
6793 void machine_power_off(void)
6794 {
6795 if (pm_power_off)
6796 pm_power_off();
6797+ BUG();
6798 }
6799diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
6800index 2242bdd..b284048 100644
6801--- a/arch/mips/kernel/sync-r4k.c
6802+++ b/arch/mips/kernel/sync-r4k.c
6803@@ -18,8 +18,8 @@
6804 #include <asm/mipsregs.h>
6805
6806 static atomic_t count_start_flag = ATOMIC_INIT(0);
6807-static atomic_t count_count_start = ATOMIC_INIT(0);
6808-static atomic_t count_count_stop = ATOMIC_INIT(0);
6809+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
6810+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
6811 static atomic_t count_reference = ATOMIC_INIT(0);
6812
6813 #define COUNTON 100
6814@@ -58,13 +58,13 @@ void synchronise_count_master(int cpu)
6815
6816 for (i = 0; i < NR_LOOPS; i++) {
6817 /* slaves loop on '!= 2' */
6818- while (atomic_read(&count_count_start) != 1)
6819+ while (atomic_read_unchecked(&count_count_start) != 1)
6820 mb();
6821- atomic_set(&count_count_stop, 0);
6822+ atomic_set_unchecked(&count_count_stop, 0);
6823 smp_wmb();
6824
6825 /* this lets the slaves write their count register */
6826- atomic_inc(&count_count_start);
6827+ atomic_inc_unchecked(&count_count_start);
6828
6829 /*
6830 * Everyone initialises count in the last loop:
6831@@ -75,11 +75,11 @@ void synchronise_count_master(int cpu)
6832 /*
6833 * Wait for all slaves to leave the synchronization point:
6834 */
6835- while (atomic_read(&count_count_stop) != 1)
6836+ while (atomic_read_unchecked(&count_count_stop) != 1)
6837 mb();
6838- atomic_set(&count_count_start, 0);
6839+ atomic_set_unchecked(&count_count_start, 0);
6840 smp_wmb();
6841- atomic_inc(&count_count_stop);
6842+ atomic_inc_unchecked(&count_count_stop);
6843 }
6844 /* Arrange for an interrupt in a short while */
6845 write_c0_compare(read_c0_count() + COUNTON);
6846@@ -112,8 +112,8 @@ void synchronise_count_slave(int cpu)
6847 initcount = atomic_read(&count_reference);
6848
6849 for (i = 0; i < NR_LOOPS; i++) {
6850- atomic_inc(&count_count_start);
6851- while (atomic_read(&count_count_start) != 2)
6852+ atomic_inc_unchecked(&count_count_start);
6853+ while (atomic_read_unchecked(&count_count_start) != 2)
6854 mb();
6855
6856 /*
6857@@ -122,8 +122,8 @@ void synchronise_count_slave(int cpu)
6858 if (i == NR_LOOPS-1)
6859 write_c0_count(initcount);
6860
6861- atomic_inc(&count_count_stop);
6862- while (atomic_read(&count_count_stop) != 2)
6863+ atomic_inc_unchecked(&count_count_stop);
6864+ while (atomic_read_unchecked(&count_count_stop) != 2)
6865 mb();
6866 }
6867 /* Arrange for an interrupt in a short while */
6868diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
6869index 33984c0..666a96d 100644
6870--- a/arch/mips/kernel/traps.c
6871+++ b/arch/mips/kernel/traps.c
6872@@ -689,7 +689,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
6873 siginfo_t info;
6874
6875 prev_state = exception_enter();
6876- die_if_kernel("Integer overflow", regs);
6877+ if (unlikely(!user_mode(regs))) {
6878+
6879+#ifdef CONFIG_PAX_REFCOUNT
6880+ if (fixup_exception(regs)) {
6881+ pax_report_refcount_overflow(regs);
6882+ exception_exit(prev_state);
6883+ return;
6884+ }
6885+#endif
6886+
6887+ die("Integer overflow", regs);
6888+ }
6889
6890 info.si_code = FPE_INTOVF;
6891 info.si_signo = SIGFPE;
6892diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
6893index f5e7dda..47198ec 100644
6894--- a/arch/mips/kvm/mips.c
6895+++ b/arch/mips/kvm/mips.c
6896@@ -816,7 +816,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
6897 return r;
6898 }
6899
6900-int kvm_arch_init(void *opaque)
6901+int kvm_arch_init(const void *opaque)
6902 {
6903 if (kvm_mips_callbacks) {
6904 kvm_err("kvm: module already exists\n");
6905diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
6906index 7ff8637..6004edb 100644
6907--- a/arch/mips/mm/fault.c
6908+++ b/arch/mips/mm/fault.c
6909@@ -31,6 +31,23 @@
6910
6911 int show_unhandled_signals = 1;
6912
6913+#ifdef CONFIG_PAX_PAGEEXEC
6914+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6915+{
6916+ unsigned long i;
6917+
6918+ printk(KERN_ERR "PAX: bytes at PC: ");
6919+ for (i = 0; i < 5; i++) {
6920+ unsigned int c;
6921+ if (get_user(c, (unsigned int *)pc+i))
6922+ printk(KERN_CONT "???????? ");
6923+ else
6924+ printk(KERN_CONT "%08x ", c);
6925+ }
6926+ printk("\n");
6927+}
6928+#endif
6929+
6930 /*
6931 * This routine handles page faults. It determines the address,
6932 * and the problem, and then passes it off to one of the appropriate
6933@@ -206,6 +223,14 @@ bad_area:
6934 bad_area_nosemaphore:
6935 /* User mode accesses just cause a SIGSEGV */
6936 if (user_mode(regs)) {
6937+
6938+#ifdef CONFIG_PAX_PAGEEXEC
6939+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
6940+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
6941+ do_group_exit(SIGKILL);
6942+ }
6943+#endif
6944+
6945 tsk->thread.cp0_badvaddr = address;
6946 tsk->thread.error_code = write;
6947 if (show_unhandled_signals &&
6948diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
6949index f1baadd..5472dca 100644
6950--- a/arch/mips/mm/mmap.c
6951+++ b/arch/mips/mm/mmap.c
6952@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6953 struct vm_area_struct *vma;
6954 unsigned long addr = addr0;
6955 int do_color_align;
6956+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
6957 struct vm_unmapped_area_info info;
6958
6959 if (unlikely(len > TASK_SIZE))
6960@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6961 do_color_align = 1;
6962
6963 /* requesting a specific address */
6964+
6965+#ifdef CONFIG_PAX_RANDMMAP
6966+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
6967+#endif
6968+
6969 if (addr) {
6970 if (do_color_align)
6971 addr = COLOUR_ALIGN(addr, pgoff);
6972@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6973 addr = PAGE_ALIGN(addr);
6974
6975 vma = find_vma(mm, addr);
6976- if (TASK_SIZE - len >= addr &&
6977- (!vma || addr + len <= vma->vm_start))
6978+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
6979 return addr;
6980 }
6981
6982 info.length = len;
6983 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
6984 info.align_offset = pgoff << PAGE_SHIFT;
6985+ info.threadstack_offset = offset;
6986
6987 if (dir == DOWN) {
6988 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
6989@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6990 {
6991 unsigned long random_factor = 0UL;
6992
6993+#ifdef CONFIG_PAX_RANDMMAP
6994+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
6995+#endif
6996+
6997 if (current->flags & PF_RANDOMIZE) {
6998 random_factor = get_random_int();
6999 random_factor = random_factor << PAGE_SHIFT;
7000@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7001
7002 if (mmap_is_legacy()) {
7003 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
7004+
7005+#ifdef CONFIG_PAX_RANDMMAP
7006+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7007+ mm->mmap_base += mm->delta_mmap;
7008+#endif
7009+
7010 mm->get_unmapped_area = arch_get_unmapped_area;
7011 } else {
7012 mm->mmap_base = mmap_base(random_factor);
7013+
7014+#ifdef CONFIG_PAX_RANDMMAP
7015+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7016+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7017+#endif
7018+
7019 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7020 }
7021 }
7022
7023-static inline unsigned long brk_rnd(void)
7024-{
7025- unsigned long rnd = get_random_int();
7026-
7027- rnd = rnd << PAGE_SHIFT;
7028- /* 8MB for 32bit, 256MB for 64bit */
7029- if (TASK_IS_32BIT_ADDR)
7030- rnd = rnd & 0x7ffffful;
7031- else
7032- rnd = rnd & 0xffffffful;
7033-
7034- return rnd;
7035-}
7036-
7037-unsigned long arch_randomize_brk(struct mm_struct *mm)
7038-{
7039- unsigned long base = mm->brk;
7040- unsigned long ret;
7041-
7042- ret = PAGE_ALIGN(base + brk_rnd());
7043-
7044- if (ret < mm->brk)
7045- return mm->brk;
7046-
7047- return ret;
7048-}
7049-
7050 int __virt_addr_valid(const volatile void *kaddr)
7051 {
7052 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
7053diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
7054index a2358b4..7cead4f 100644
7055--- a/arch/mips/sgi-ip27/ip27-nmi.c
7056+++ b/arch/mips/sgi-ip27/ip27-nmi.c
7057@@ -187,9 +187,9 @@ void
7058 cont_nmi_dump(void)
7059 {
7060 #ifndef REAL_NMI_SIGNAL
7061- static atomic_t nmied_cpus = ATOMIC_INIT(0);
7062+ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
7063
7064- atomic_inc(&nmied_cpus);
7065+ atomic_inc_unchecked(&nmied_cpus);
7066 #endif
7067 /*
7068 * Only allow 1 cpu to proceed
7069@@ -233,7 +233,7 @@ cont_nmi_dump(void)
7070 udelay(10000);
7071 }
7072 #else
7073- while (atomic_read(&nmied_cpus) != num_online_cpus());
7074+ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
7075 #endif
7076
7077 /*
7078diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
7079index a046b30..6799527 100644
7080--- a/arch/mips/sni/rm200.c
7081+++ b/arch/mips/sni/rm200.c
7082@@ -270,7 +270,7 @@ spurious_8259A_irq:
7083 "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
7084 spurious_irq_mask |= irqmask;
7085 }
7086- atomic_inc(&irq_err_count);
7087+ atomic_inc_unchecked(&irq_err_count);
7088 /*
7089 * Theoretically we do not have to handle this IRQ,
7090 * but in Linux this does not cause problems and is
7091diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
7092index 41e873b..34d33a7 100644
7093--- a/arch/mips/vr41xx/common/icu.c
7094+++ b/arch/mips/vr41xx/common/icu.c
7095@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
7096
7097 printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
7098
7099- atomic_inc(&irq_err_count);
7100+ atomic_inc_unchecked(&irq_err_count);
7101
7102 return -1;
7103 }
7104diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
7105index ae0e4ee..e8f0692 100644
7106--- a/arch/mips/vr41xx/common/irq.c
7107+++ b/arch/mips/vr41xx/common/irq.c
7108@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
7109 irq_cascade_t *cascade;
7110
7111 if (irq >= NR_IRQS) {
7112- atomic_inc(&irq_err_count);
7113+ atomic_inc_unchecked(&irq_err_count);
7114 return;
7115 }
7116
7117@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
7118 ret = cascade->get_irq(irq);
7119 irq = ret;
7120 if (ret < 0)
7121- atomic_inc(&irq_err_count);
7122+ atomic_inc_unchecked(&irq_err_count);
7123 else
7124 irq_dispatch(irq);
7125 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
7126diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7127index 967d144..db12197 100644
7128--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
7129+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7130@@ -11,12 +11,14 @@
7131 #ifndef _ASM_PROC_CACHE_H
7132 #define _ASM_PROC_CACHE_H
7133
7134+#include <linux/const.h>
7135+
7136 /* L1 cache */
7137
7138 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7139 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
7140-#define L1_CACHE_BYTES 16 /* bytes per entry */
7141 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
7142+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7143 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
7144
7145 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7146diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7147index bcb5df2..84fabd2 100644
7148--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7149+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7150@@ -16,13 +16,15 @@
7151 #ifndef _ASM_PROC_CACHE_H
7152 #define _ASM_PROC_CACHE_H
7153
7154+#include <linux/const.h>
7155+
7156 /*
7157 * L1 cache
7158 */
7159 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7160 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
7161-#define L1_CACHE_BYTES 32 /* bytes per entry */
7162 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
7163+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7164 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
7165
7166 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7167diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
7168index 4ce7a01..449202a 100644
7169--- a/arch/openrisc/include/asm/cache.h
7170+++ b/arch/openrisc/include/asm/cache.h
7171@@ -19,11 +19,13 @@
7172 #ifndef __ASM_OPENRISC_CACHE_H
7173 #define __ASM_OPENRISC_CACHE_H
7174
7175+#include <linux/const.h>
7176+
7177 /* FIXME: How can we replace these with values from the CPU...
7178 * they shouldn't be hard-coded!
7179 */
7180
7181-#define L1_CACHE_BYTES 16
7182 #define L1_CACHE_SHIFT 4
7183+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7184
7185 #endif /* __ASM_OPENRISC_CACHE_H */
7186diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
7187index 226f8ca..9d9b87d 100644
7188--- a/arch/parisc/include/asm/atomic.h
7189+++ b/arch/parisc/include/asm/atomic.h
7190@@ -273,6 +273,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
7191 return dec;
7192 }
7193
7194+#define atomic64_read_unchecked(v) atomic64_read(v)
7195+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7196+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7197+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7198+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7199+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7200+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7201+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7202+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7203+
7204 #endif /* !CONFIG_64BIT */
7205
7206
7207diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
7208index 47f11c7..3420df2 100644
7209--- a/arch/parisc/include/asm/cache.h
7210+++ b/arch/parisc/include/asm/cache.h
7211@@ -5,6 +5,7 @@
7212 #ifndef __ARCH_PARISC_CACHE_H
7213 #define __ARCH_PARISC_CACHE_H
7214
7215+#include <linux/const.h>
7216
7217 /*
7218 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
7219@@ -15,13 +16,13 @@
7220 * just ruin performance.
7221 */
7222 #ifdef CONFIG_PA20
7223-#define L1_CACHE_BYTES 64
7224 #define L1_CACHE_SHIFT 6
7225 #else
7226-#define L1_CACHE_BYTES 32
7227 #define L1_CACHE_SHIFT 5
7228 #endif
7229
7230+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7231+
7232 #ifndef __ASSEMBLY__
7233
7234 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7235diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
7236index 3391d06..c23a2cc 100644
7237--- a/arch/parisc/include/asm/elf.h
7238+++ b/arch/parisc/include/asm/elf.h
7239@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
7240
7241 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
7242
7243+#ifdef CONFIG_PAX_ASLR
7244+#define PAX_ELF_ET_DYN_BASE 0x10000UL
7245+
7246+#define PAX_DELTA_MMAP_LEN 16
7247+#define PAX_DELTA_STACK_LEN 16
7248+#endif
7249+
7250 /* This yields a mask that user programs can use to figure out what
7251 instruction set this CPU supports. This could be done in user space,
7252 but it's not easy, and we've already done it here. */
7253diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
7254index d174372..f27fe5c 100644
7255--- a/arch/parisc/include/asm/pgalloc.h
7256+++ b/arch/parisc/include/asm/pgalloc.h
7257@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7258 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
7259 }
7260
7261+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7262+{
7263+ pgd_populate(mm, pgd, pmd);
7264+}
7265+
7266 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
7267 {
7268 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
7269@@ -96,6 +101,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
7270 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
7271 #define pmd_free(mm, x) do { } while (0)
7272 #define pgd_populate(mm, pmd, pte) BUG()
7273+#define pgd_populate_kernel(mm, pmd, pte) BUG()
7274
7275 #endif
7276
7277diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
7278index 15207b9..3209e65 100644
7279--- a/arch/parisc/include/asm/pgtable.h
7280+++ b/arch/parisc/include/asm/pgtable.h
7281@@ -215,6 +215,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
7282 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
7283 #define PAGE_COPY PAGE_EXECREAD
7284 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
7285+
7286+#ifdef CONFIG_PAX_PAGEEXEC
7287+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
7288+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7289+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7290+#else
7291+# define PAGE_SHARED_NOEXEC PAGE_SHARED
7292+# define PAGE_COPY_NOEXEC PAGE_COPY
7293+# define PAGE_READONLY_NOEXEC PAGE_READONLY
7294+#endif
7295+
7296 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
7297 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
7298 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
7299diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
7300index 0abdd4c..1af92f0 100644
7301--- a/arch/parisc/include/asm/uaccess.h
7302+++ b/arch/parisc/include/asm/uaccess.h
7303@@ -243,10 +243,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
7304 const void __user *from,
7305 unsigned long n)
7306 {
7307- int sz = __compiletime_object_size(to);
7308+ size_t sz = __compiletime_object_size(to);
7309 int ret = -EFAULT;
7310
7311- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
7312+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
7313 ret = __copy_from_user(to, from, n);
7314 else
7315 copy_from_user_overflow();
7316diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
7317index 3c63a82..b1d6ee9 100644
7318--- a/arch/parisc/kernel/module.c
7319+++ b/arch/parisc/kernel/module.c
7320@@ -98,16 +98,38 @@
7321
7322 /* three functions to determine where in the module core
7323 * or init pieces the location is */
7324+static inline int in_init_rx(struct module *me, void *loc)
7325+{
7326+ return (loc >= me->module_init_rx &&
7327+ loc < (me->module_init_rx + me->init_size_rx));
7328+}
7329+
7330+static inline int in_init_rw(struct module *me, void *loc)
7331+{
7332+ return (loc >= me->module_init_rw &&
7333+ loc < (me->module_init_rw + me->init_size_rw));
7334+}
7335+
7336 static inline int in_init(struct module *me, void *loc)
7337 {
7338- return (loc >= me->module_init &&
7339- loc <= (me->module_init + me->init_size));
7340+ return in_init_rx(me, loc) || in_init_rw(me, loc);
7341+}
7342+
7343+static inline int in_core_rx(struct module *me, void *loc)
7344+{
7345+ return (loc >= me->module_core_rx &&
7346+ loc < (me->module_core_rx + me->core_size_rx));
7347+}
7348+
7349+static inline int in_core_rw(struct module *me, void *loc)
7350+{
7351+ return (loc >= me->module_core_rw &&
7352+ loc < (me->module_core_rw + me->core_size_rw));
7353 }
7354
7355 static inline int in_core(struct module *me, void *loc)
7356 {
7357- return (loc >= me->module_core &&
7358- loc <= (me->module_core + me->core_size));
7359+ return in_core_rx(me, loc) || in_core_rw(me, loc);
7360 }
7361
7362 static inline int in_local(struct module *me, void *loc)
7363@@ -367,13 +389,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
7364 }
7365
7366 /* align things a bit */
7367- me->core_size = ALIGN(me->core_size, 16);
7368- me->arch.got_offset = me->core_size;
7369- me->core_size += gots * sizeof(struct got_entry);
7370+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7371+ me->arch.got_offset = me->core_size_rw;
7372+ me->core_size_rw += gots * sizeof(struct got_entry);
7373
7374- me->core_size = ALIGN(me->core_size, 16);
7375- me->arch.fdesc_offset = me->core_size;
7376- me->core_size += fdescs * sizeof(Elf_Fdesc);
7377+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7378+ me->arch.fdesc_offset = me->core_size_rw;
7379+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
7380
7381 me->arch.got_max = gots;
7382 me->arch.fdesc_max = fdescs;
7383@@ -391,7 +413,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7384
7385 BUG_ON(value == 0);
7386
7387- got = me->module_core + me->arch.got_offset;
7388+ got = me->module_core_rw + me->arch.got_offset;
7389 for (i = 0; got[i].addr; i++)
7390 if (got[i].addr == value)
7391 goto out;
7392@@ -409,7 +431,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7393 #ifdef CONFIG_64BIT
7394 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7395 {
7396- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
7397+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
7398
7399 if (!value) {
7400 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
7401@@ -427,7 +449,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7402
7403 /* Create new one */
7404 fdesc->addr = value;
7405- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7406+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7407 return (Elf_Addr)fdesc;
7408 }
7409 #endif /* CONFIG_64BIT */
7410@@ -839,7 +861,7 @@ register_unwind_table(struct module *me,
7411
7412 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
7413 end = table + sechdrs[me->arch.unwind_section].sh_size;
7414- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7415+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7416
7417 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
7418 me->arch.unwind_section, table, end, gp);
7419diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
7420index e1ffea2..46ed66e 100644
7421--- a/arch/parisc/kernel/sys_parisc.c
7422+++ b/arch/parisc/kernel/sys_parisc.c
7423@@ -89,6 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7424 unsigned long task_size = TASK_SIZE;
7425 int do_color_align, last_mmap;
7426 struct vm_unmapped_area_info info;
7427+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7428
7429 if (len > task_size)
7430 return -ENOMEM;
7431@@ -106,6 +107,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7432 goto found_addr;
7433 }
7434
7435+#ifdef CONFIG_PAX_RANDMMAP
7436+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7437+#endif
7438+
7439 if (addr) {
7440 if (do_color_align && last_mmap)
7441 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7442@@ -124,6 +129,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7443 info.high_limit = mmap_upper_limit();
7444 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7445 info.align_offset = shared_align_offset(last_mmap, pgoff);
7446+ info.threadstack_offset = offset;
7447 addr = vm_unmapped_area(&info);
7448
7449 found_addr:
7450@@ -143,6 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7451 unsigned long addr = addr0;
7452 int do_color_align, last_mmap;
7453 struct vm_unmapped_area_info info;
7454+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7455
7456 #ifdef CONFIG_64BIT
7457 /* This should only ever run for 32-bit processes. */
7458@@ -167,6 +174,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7459 }
7460
7461 /* requesting a specific address */
7462+#ifdef CONFIG_PAX_RANDMMAP
7463+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7464+#endif
7465+
7466 if (addr) {
7467 if (do_color_align && last_mmap)
7468 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7469@@ -184,6 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7470 info.high_limit = mm->mmap_base;
7471 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7472 info.align_offset = shared_align_offset(last_mmap, pgoff);
7473+ info.threadstack_offset = offset;
7474 addr = vm_unmapped_area(&info);
7475 if (!(addr & ~PAGE_MASK))
7476 goto found_addr;
7477@@ -249,6 +261,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7478 mm->mmap_legacy_base = mmap_legacy_base();
7479 mm->mmap_base = mmap_upper_limit();
7480
7481+#ifdef CONFIG_PAX_RANDMMAP
7482+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
7483+ mm->mmap_legacy_base += mm->delta_mmap;
7484+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7485+ }
7486+#endif
7487+
7488 if (mmap_is_legacy()) {
7489 mm->mmap_base = mm->mmap_legacy_base;
7490 mm->get_unmapped_area = arch_get_unmapped_area;
7491diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
7492index 47ee620..1107387 100644
7493--- a/arch/parisc/kernel/traps.c
7494+++ b/arch/parisc/kernel/traps.c
7495@@ -726,9 +726,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
7496
7497 down_read(&current->mm->mmap_sem);
7498 vma = find_vma(current->mm,regs->iaoq[0]);
7499- if (vma && (regs->iaoq[0] >= vma->vm_start)
7500- && (vma->vm_flags & VM_EXEC)) {
7501-
7502+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
7503 fault_address = regs->iaoq[0];
7504 fault_space = regs->iasq[0];
7505
7506diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
7507index e5120e6..8ddb5cc 100644
7508--- a/arch/parisc/mm/fault.c
7509+++ b/arch/parisc/mm/fault.c
7510@@ -15,6 +15,7 @@
7511 #include <linux/sched.h>
7512 #include <linux/interrupt.h>
7513 #include <linux/module.h>
7514+#include <linux/unistd.h>
7515
7516 #include <asm/uaccess.h>
7517 #include <asm/traps.h>
7518@@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
7519 static unsigned long
7520 parisc_acctyp(unsigned long code, unsigned int inst)
7521 {
7522- if (code == 6 || code == 16)
7523+ if (code == 6 || code == 7 || code == 16)
7524 return VM_EXEC;
7525
7526 switch (inst & 0xf0000000) {
7527@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
7528 }
7529 #endif
7530
7531+#ifdef CONFIG_PAX_PAGEEXEC
7532+/*
7533+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
7534+ *
7535+ * returns 1 when task should be killed
7536+ * 2 when rt_sigreturn trampoline was detected
7537+ * 3 when unpatched PLT trampoline was detected
7538+ */
7539+static int pax_handle_fetch_fault(struct pt_regs *regs)
7540+{
7541+
7542+#ifdef CONFIG_PAX_EMUPLT
7543+ int err;
7544+
7545+ do { /* PaX: unpatched PLT emulation */
7546+ unsigned int bl, depwi;
7547+
7548+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
7549+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
7550+
7551+ if (err)
7552+ break;
7553+
7554+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
7555+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
7556+
7557+ err = get_user(ldw, (unsigned int *)addr);
7558+ err |= get_user(bv, (unsigned int *)(addr+4));
7559+ err |= get_user(ldw2, (unsigned int *)(addr+8));
7560+
7561+ if (err)
7562+ break;
7563+
7564+ if (ldw == 0x0E801096U &&
7565+ bv == 0xEAC0C000U &&
7566+ ldw2 == 0x0E881095U)
7567+ {
7568+ unsigned int resolver, map;
7569+
7570+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
7571+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
7572+ if (err)
7573+ break;
7574+
7575+ regs->gr[20] = instruction_pointer(regs)+8;
7576+ regs->gr[21] = map;
7577+ regs->gr[22] = resolver;
7578+ regs->iaoq[0] = resolver | 3UL;
7579+ regs->iaoq[1] = regs->iaoq[0] + 4;
7580+ return 3;
7581+ }
7582+ }
7583+ } while (0);
7584+#endif
7585+
7586+#ifdef CONFIG_PAX_EMUTRAMP
7587+
7588+#ifndef CONFIG_PAX_EMUSIGRT
7589+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
7590+ return 1;
7591+#endif
7592+
7593+ do { /* PaX: rt_sigreturn emulation */
7594+ unsigned int ldi1, ldi2, bel, nop;
7595+
7596+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
7597+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
7598+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
7599+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
7600+
7601+ if (err)
7602+ break;
7603+
7604+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
7605+ ldi2 == 0x3414015AU &&
7606+ bel == 0xE4008200U &&
7607+ nop == 0x08000240U)
7608+ {
7609+ regs->gr[25] = (ldi1 & 2) >> 1;
7610+ regs->gr[20] = __NR_rt_sigreturn;
7611+ regs->gr[31] = regs->iaoq[1] + 16;
7612+ regs->sr[0] = regs->iasq[1];
7613+ regs->iaoq[0] = 0x100UL;
7614+ regs->iaoq[1] = regs->iaoq[0] + 4;
7615+ regs->iasq[0] = regs->sr[2];
7616+ regs->iasq[1] = regs->sr[2];
7617+ return 2;
7618+ }
7619+ } while (0);
7620+#endif
7621+
7622+ return 1;
7623+}
7624+
7625+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7626+{
7627+ unsigned long i;
7628+
7629+ printk(KERN_ERR "PAX: bytes at PC: ");
7630+ for (i = 0; i < 5; i++) {
7631+ unsigned int c;
7632+ if (get_user(c, (unsigned int *)pc+i))
7633+ printk(KERN_CONT "???????? ");
7634+ else
7635+ printk(KERN_CONT "%08x ", c);
7636+ }
7637+ printk("\n");
7638+}
7639+#endif
7640+
7641 int fixup_exception(struct pt_regs *regs)
7642 {
7643 const struct exception_table_entry *fix;
7644@@ -234,8 +345,33 @@ retry:
7645
7646 good_area:
7647
7648- if ((vma->vm_flags & acc_type) != acc_type)
7649+ if ((vma->vm_flags & acc_type) != acc_type) {
7650+
7651+#ifdef CONFIG_PAX_PAGEEXEC
7652+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
7653+ (address & ~3UL) == instruction_pointer(regs))
7654+ {
7655+ up_read(&mm->mmap_sem);
7656+ switch (pax_handle_fetch_fault(regs)) {
7657+
7658+#ifdef CONFIG_PAX_EMUPLT
7659+ case 3:
7660+ return;
7661+#endif
7662+
7663+#ifdef CONFIG_PAX_EMUTRAMP
7664+ case 2:
7665+ return;
7666+#endif
7667+
7668+ }
7669+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
7670+ do_group_exit(SIGKILL);
7671+ }
7672+#endif
7673+
7674 goto bad_area;
7675+ }
7676
7677 /*
7678 * If for any reason at all we couldn't handle the fault, make
7679diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
7680index 22b0940..309f790 100644
7681--- a/arch/powerpc/Kconfig
7682+++ b/arch/powerpc/Kconfig
7683@@ -409,6 +409,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
7684 config KEXEC
7685 bool "kexec system call"
7686 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
7687+ depends on !GRKERNSEC_KMEM
7688 help
7689 kexec is a system call that implements the ability to shutdown your
7690 current kernel, and to start another kernel. It is like a reboot
7691diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
7692index 512d278..d31fadd 100644
7693--- a/arch/powerpc/include/asm/atomic.h
7694+++ b/arch/powerpc/include/asm/atomic.h
7695@@ -12,6 +12,11 @@
7696
7697 #define ATOMIC_INIT(i) { (i) }
7698
7699+#define _ASM_EXTABLE(from, to) \
7700+" .section __ex_table,\"a\"\n" \
7701+ PPC_LONG" " #from ", " #to"\n" \
7702+" .previous\n"
7703+
7704 static __inline__ int atomic_read(const atomic_t *v)
7705 {
7706 int t;
7707@@ -21,39 +26,80 @@ static __inline__ int atomic_read(const atomic_t *v)
7708 return t;
7709 }
7710
7711+static __inline__ int atomic_read_unchecked(const atomic_unchecked_t *v)
7712+{
7713+ int t;
7714+
7715+ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
7716+
7717+ return t;
7718+}
7719+
7720 static __inline__ void atomic_set(atomic_t *v, int i)
7721 {
7722 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7723 }
7724
7725-#define ATOMIC_OP(op, asm_op) \
7726-static __inline__ void atomic_##op(int a, atomic_t *v) \
7727+static __inline__ void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7728+{
7729+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7730+}
7731+
7732+#ifdef CONFIG_PAX_REFCOUNT
7733+#define __REFCOUNT_OP(op) op##o.
7734+#define __OVERFLOW_PRE \
7735+ " mcrxr cr0\n"
7736+#define __OVERFLOW_POST \
7737+ " bf 4*cr0+so, 3f\n" \
7738+ "2: .long 0x00c00b00\n" \
7739+ "3:\n"
7740+#define __OVERFLOW_EXTABLE \
7741+ "\n4:\n"
7742+ _ASM_EXTABLE(2b, 4b)
7743+#else
7744+#define __REFCOUNT_OP(op) op
7745+#define __OVERFLOW_PRE
7746+#define __OVERFLOW_POST
7747+#define __OVERFLOW_EXTABLE
7748+#endif
7749+
7750+#define __ATOMIC_OP(op, suffix, pre_op, asm_op, post_op, extable) \
7751+static inline void atomic_##op##suffix(int a, atomic##suffix##_t *v) \
7752 { \
7753 int t; \
7754 \
7755 __asm__ __volatile__( \
7756-"1: lwarx %0,0,%3 # atomic_" #op "\n" \
7757+"1: lwarx %0,0,%3 # atomic_" #op #suffix "\n" \
7758+ pre_op \
7759 #asm_op " %0,%2,%0\n" \
7760+ post_op \
7761 PPC405_ERR77(0,%3) \
7762 " stwcx. %0,0,%3 \n" \
7763 " bne- 1b\n" \
7764+ extable \
7765 : "=&r" (t), "+m" (v->counter) \
7766 : "r" (a), "r" (&v->counter) \
7767 : "cc"); \
7768 } \
7769
7770-#define ATOMIC_OP_RETURN(op, asm_op) \
7771-static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7772+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , , asm_op, , ) \
7773+ __ATOMIC_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7774+
7775+#define __ATOMIC_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
7776+static inline int atomic_##op##_return##suffix(int a, atomic##suffix##_t *v)\
7777 { \
7778 int t; \
7779 \
7780 __asm__ __volatile__( \
7781 PPC_ATOMIC_ENTRY_BARRIER \
7782-"1: lwarx %0,0,%2 # atomic_" #op "_return\n" \
7783+"1: lwarx %0,0,%2 # atomic_" #op "_return" #suffix "\n" \
7784+ pre_op \
7785 #asm_op " %0,%1,%0\n" \
7786+ post_op \
7787 PPC405_ERR77(0,%2) \
7788 " stwcx. %0,0,%2 \n" \
7789 " bne- 1b\n" \
7790+ extable \
7791 PPC_ATOMIC_EXIT_BARRIER \
7792 : "=&r" (t) \
7793 : "r" (a), "r" (&v->counter) \
7794@@ -62,6 +108,9 @@ static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7795 return t; \
7796 }
7797
7798+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, , , asm_op, , )\
7799+ __ATOMIC_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7800+
7801 #define ATOMIC_OPS(op, asm_op) ATOMIC_OP(op, asm_op) ATOMIC_OP_RETURN(op, asm_op)
7802
7803 ATOMIC_OPS(add, add)
7804@@ -69,42 +118,29 @@ ATOMIC_OPS(sub, subf)
7805
7806 #undef ATOMIC_OPS
7807 #undef ATOMIC_OP_RETURN
7808+#undef __ATOMIC_OP_RETURN
7809 #undef ATOMIC_OP
7810+#undef __ATOMIC_OP
7811
7812 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
7813
7814-static __inline__ void atomic_inc(atomic_t *v)
7815-{
7816- int t;
7817+/*
7818+ * atomic_inc - increment atomic variable
7819+ * @v: pointer of type atomic_t
7820+ *
7821+ * Automatically increments @v by 1
7822+ */
7823+#define atomic_inc(v) atomic_add(1, (v))
7824+#define atomic_inc_return(v) atomic_add_return(1, (v))
7825
7826- __asm__ __volatile__(
7827-"1: lwarx %0,0,%2 # atomic_inc\n\
7828- addic %0,%0,1\n"
7829- PPC405_ERR77(0,%2)
7830-" stwcx. %0,0,%2 \n\
7831- bne- 1b"
7832- : "=&r" (t), "+m" (v->counter)
7833- : "r" (&v->counter)
7834- : "cc", "xer");
7835+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7836+{
7837+ atomic_add_unchecked(1, v);
7838 }
7839
7840-static __inline__ int atomic_inc_return(atomic_t *v)
7841+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7842 {
7843- int t;
7844-
7845- __asm__ __volatile__(
7846- PPC_ATOMIC_ENTRY_BARRIER
7847-"1: lwarx %0,0,%1 # atomic_inc_return\n\
7848- addic %0,%0,1\n"
7849- PPC405_ERR77(0,%1)
7850-" stwcx. %0,0,%1 \n\
7851- bne- 1b"
7852- PPC_ATOMIC_EXIT_BARRIER
7853- : "=&r" (t)
7854- : "r" (&v->counter)
7855- : "cc", "xer", "memory");
7856-
7857- return t;
7858+ return atomic_add_return_unchecked(1, v);
7859 }
7860
7861 /*
7862@@ -117,43 +153,38 @@ static __inline__ int atomic_inc_return(atomic_t *v)
7863 */
7864 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
7865
7866-static __inline__ void atomic_dec(atomic_t *v)
7867+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7868 {
7869- int t;
7870-
7871- __asm__ __volatile__(
7872-"1: lwarx %0,0,%2 # atomic_dec\n\
7873- addic %0,%0,-1\n"
7874- PPC405_ERR77(0,%2)\
7875-" stwcx. %0,0,%2\n\
7876- bne- 1b"
7877- : "=&r" (t), "+m" (v->counter)
7878- : "r" (&v->counter)
7879- : "cc", "xer");
7880+ return atomic_add_return_unchecked(1, v) == 0;
7881 }
7882
7883-static __inline__ int atomic_dec_return(atomic_t *v)
7884+/*
7885+ * atomic_dec - decrement atomic variable
7886+ * @v: pointer of type atomic_t
7887+ *
7888+ * Atomically decrements @v by 1
7889+ */
7890+#define atomic_dec(v) atomic_sub(1, (v))
7891+#define atomic_dec_return(v) atomic_sub_return(1, (v))
7892+
7893+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
7894 {
7895- int t;
7896-
7897- __asm__ __volatile__(
7898- PPC_ATOMIC_ENTRY_BARRIER
7899-"1: lwarx %0,0,%1 # atomic_dec_return\n\
7900- addic %0,%0,-1\n"
7901- PPC405_ERR77(0,%1)
7902-" stwcx. %0,0,%1\n\
7903- bne- 1b"
7904- PPC_ATOMIC_EXIT_BARRIER
7905- : "=&r" (t)
7906- : "r" (&v->counter)
7907- : "cc", "xer", "memory");
7908-
7909- return t;
7910+ atomic_sub_unchecked(1, v);
7911 }
7912
7913 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
7914 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
7915
7916+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7917+{
7918+ return cmpxchg(&(v->counter), old, new);
7919+}
7920+
7921+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7922+{
7923+ return xchg(&(v->counter), new);
7924+}
7925+
7926 /**
7927 * __atomic_add_unless - add unless the number is a given value
7928 * @v: pointer of type atomic_t
7929@@ -171,11 +202,27 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
7930 PPC_ATOMIC_ENTRY_BARRIER
7931 "1: lwarx %0,0,%1 # __atomic_add_unless\n\
7932 cmpw 0,%0,%3 \n\
7933- beq- 2f \n\
7934- add %0,%2,%0 \n"
7935+ beq- 2f \n"
7936+
7937+#ifdef CONFIG_PAX_REFCOUNT
7938+" mcrxr cr0\n"
7939+" addo. %0,%2,%0\n"
7940+" bf 4*cr0+so, 4f\n"
7941+"3:.long " "0x00c00b00""\n"
7942+"4:\n"
7943+#else
7944+ "add %0,%2,%0 \n"
7945+#endif
7946+
7947 PPC405_ERR77(0,%2)
7948 " stwcx. %0,0,%1 \n\
7949 bne- 1b \n"
7950+"5:"
7951+
7952+#ifdef CONFIG_PAX_REFCOUNT
7953+ _ASM_EXTABLE(3b, 5b)
7954+#endif
7955+
7956 PPC_ATOMIC_EXIT_BARRIER
7957 " subf %0,%2,%0 \n\
7958 2:"
7959@@ -248,6 +295,11 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
7960 }
7961 #define atomic_dec_if_positive atomic_dec_if_positive
7962
7963+#define smp_mb__before_atomic_dec() smp_mb()
7964+#define smp_mb__after_atomic_dec() smp_mb()
7965+#define smp_mb__before_atomic_inc() smp_mb()
7966+#define smp_mb__after_atomic_inc() smp_mb()
7967+
7968 #ifdef __powerpc64__
7969
7970 #define ATOMIC64_INIT(i) { (i) }
7971@@ -261,37 +313,60 @@ static __inline__ long atomic64_read(const atomic64_t *v)
7972 return t;
7973 }
7974
7975+static __inline__ long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7976+{
7977+ long t;
7978+
7979+ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
7980+
7981+ return t;
7982+}
7983+
7984 static __inline__ void atomic64_set(atomic64_t *v, long i)
7985 {
7986 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7987 }
7988
7989-#define ATOMIC64_OP(op, asm_op) \
7990-static __inline__ void atomic64_##op(long a, atomic64_t *v) \
7991+static __inline__ void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7992+{
7993+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7994+}
7995+
7996+#define __ATOMIC64_OP(op, suffix, pre_op, asm_op, post_op, extable) \
7997+static inline void atomic64_##op##suffix(long a, atomic64##suffix##_t *v)\
7998 { \
7999 long t; \
8000 \
8001 __asm__ __volatile__( \
8002 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
8003+ pre_op \
8004 #asm_op " %0,%2,%0\n" \
8005+ post_op \
8006 " stdcx. %0,0,%3 \n" \
8007 " bne- 1b\n" \
8008+ extable \
8009 : "=&r" (t), "+m" (v->counter) \
8010 : "r" (a), "r" (&v->counter) \
8011 : "cc"); \
8012 }
8013
8014-#define ATOMIC64_OP_RETURN(op, asm_op) \
8015-static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
8016+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , , asm_op, , ) \
8017+ __ATOMIC64_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
8018+
8019+#define __ATOMIC64_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
8020+static inline long atomic64_##op##_return##suffix(long a, atomic64##suffix##_t *v)\
8021 { \
8022 long t; \
8023 \
8024 __asm__ __volatile__( \
8025 PPC_ATOMIC_ENTRY_BARRIER \
8026 "1: ldarx %0,0,%2 # atomic64_" #op "_return\n" \
8027+ pre_op \
8028 #asm_op " %0,%1,%0\n" \
8029+ post_op \
8030 " stdcx. %0,0,%2 \n" \
8031 " bne- 1b\n" \
8032+ extable \
8033 PPC_ATOMIC_EXIT_BARRIER \
8034 : "=&r" (t) \
8035 : "r" (a), "r" (&v->counter) \
8036@@ -300,6 +375,9 @@ static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
8037 return t; \
8038 }
8039
8040+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, , , asm_op, , )\
8041+ __ATOMIC64_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
8042+
8043 #define ATOMIC64_OPS(op, asm_op) ATOMIC64_OP(op, asm_op) ATOMIC64_OP_RETURN(op, asm_op)
8044
8045 ATOMIC64_OPS(add, add)
8046@@ -307,40 +385,33 @@ ATOMIC64_OPS(sub, subf)
8047
8048 #undef ATOMIC64_OPS
8049 #undef ATOMIC64_OP_RETURN
8050+#undef __ATOMIC64_OP_RETURN
8051 #undef ATOMIC64_OP
8052+#undef __ATOMIC64_OP
8053+#undef __OVERFLOW_EXTABLE
8054+#undef __OVERFLOW_POST
8055+#undef __OVERFLOW_PRE
8056+#undef __REFCOUNT_OP
8057
8058 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
8059
8060-static __inline__ void atomic64_inc(atomic64_t *v)
8061-{
8062- long t;
8063+/*
8064+ * atomic64_inc - increment atomic variable
8065+ * @v: pointer of type atomic64_t
8066+ *
8067+ * Automatically increments @v by 1
8068+ */
8069+#define atomic64_inc(v) atomic64_add(1, (v))
8070+#define atomic64_inc_return(v) atomic64_add_return(1, (v))
8071
8072- __asm__ __volatile__(
8073-"1: ldarx %0,0,%2 # atomic64_inc\n\
8074- addic %0,%0,1\n\
8075- stdcx. %0,0,%2 \n\
8076- bne- 1b"
8077- : "=&r" (t), "+m" (v->counter)
8078- : "r" (&v->counter)
8079- : "cc", "xer");
8080+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8081+{
8082+ atomic64_add_unchecked(1, v);
8083 }
8084
8085-static __inline__ long atomic64_inc_return(atomic64_t *v)
8086+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8087 {
8088- long t;
8089-
8090- __asm__ __volatile__(
8091- PPC_ATOMIC_ENTRY_BARRIER
8092-"1: ldarx %0,0,%1 # atomic64_inc_return\n\
8093- addic %0,%0,1\n\
8094- stdcx. %0,0,%1 \n\
8095- bne- 1b"
8096- PPC_ATOMIC_EXIT_BARRIER
8097- : "=&r" (t)
8098- : "r" (&v->counter)
8099- : "cc", "xer", "memory");
8100-
8101- return t;
8102+ return atomic64_add_return_unchecked(1, v);
8103 }
8104
8105 /*
8106@@ -353,36 +424,18 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
8107 */
8108 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
8109
8110-static __inline__ void atomic64_dec(atomic64_t *v)
8111+/*
8112+ * atomic64_dec - decrement atomic variable
8113+ * @v: pointer of type atomic64_t
8114+ *
8115+ * Atomically decrements @v by 1
8116+ */
8117+#define atomic64_dec(v) atomic64_sub(1, (v))
8118+#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
8119+
8120+static __inline__ void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8121 {
8122- long t;
8123-
8124- __asm__ __volatile__(
8125-"1: ldarx %0,0,%2 # atomic64_dec\n\
8126- addic %0,%0,-1\n\
8127- stdcx. %0,0,%2\n\
8128- bne- 1b"
8129- : "=&r" (t), "+m" (v->counter)
8130- : "r" (&v->counter)
8131- : "cc", "xer");
8132-}
8133-
8134-static __inline__ long atomic64_dec_return(atomic64_t *v)
8135-{
8136- long t;
8137-
8138- __asm__ __volatile__(
8139- PPC_ATOMIC_ENTRY_BARRIER
8140-"1: ldarx %0,0,%1 # atomic64_dec_return\n\
8141- addic %0,%0,-1\n\
8142- stdcx. %0,0,%1\n\
8143- bne- 1b"
8144- PPC_ATOMIC_EXIT_BARRIER
8145- : "=&r" (t)
8146- : "r" (&v->counter)
8147- : "cc", "xer", "memory");
8148-
8149- return t;
8150+ atomic64_sub_unchecked(1, v);
8151 }
8152
8153 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
8154@@ -415,6 +468,16 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
8155 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8156 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
8157
8158+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8159+{
8160+ return cmpxchg(&(v->counter), old, new);
8161+}
8162+
8163+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8164+{
8165+ return xchg(&(v->counter), new);
8166+}
8167+
8168 /**
8169 * atomic64_add_unless - add unless the number is a given value
8170 * @v: pointer of type atomic64_t
8171@@ -430,13 +493,29 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
8172
8173 __asm__ __volatile__ (
8174 PPC_ATOMIC_ENTRY_BARRIER
8175-"1: ldarx %0,0,%1 # __atomic_add_unless\n\
8176+"1: ldarx %0,0,%1 # atomic64_add_unless\n\
8177 cmpd 0,%0,%3 \n\
8178- beq- 2f \n\
8179- add %0,%2,%0 \n"
8180+ beq- 2f \n"
8181+
8182+#ifdef CONFIG_PAX_REFCOUNT
8183+" mcrxr cr0\n"
8184+" addo. %0,%2,%0\n"
8185+" bf 4*cr0+so, 4f\n"
8186+"3:.long " "0x00c00b00""\n"
8187+"4:\n"
8188+#else
8189+ "add %0,%2,%0 \n"
8190+#endif
8191+
8192 " stdcx. %0,0,%1 \n\
8193 bne- 1b \n"
8194 PPC_ATOMIC_EXIT_BARRIER
8195+"5:"
8196+
8197+#ifdef CONFIG_PAX_REFCOUNT
8198+ _ASM_EXTABLE(3b, 5b)
8199+#endif
8200+
8201 " subf %0,%2,%0 \n\
8202 2:"
8203 : "=&r" (t)
8204diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
8205index a3bf5be..e03ba81 100644
8206--- a/arch/powerpc/include/asm/barrier.h
8207+++ b/arch/powerpc/include/asm/barrier.h
8208@@ -76,7 +76,7 @@
8209 do { \
8210 compiletime_assert_atomic_type(*p); \
8211 smp_lwsync(); \
8212- ACCESS_ONCE(*p) = (v); \
8213+ ACCESS_ONCE_RW(*p) = (v); \
8214 } while (0)
8215
8216 #define smp_load_acquire(p) \
8217diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
8218index 34a05a1..a1f2c67 100644
8219--- a/arch/powerpc/include/asm/cache.h
8220+++ b/arch/powerpc/include/asm/cache.h
8221@@ -4,6 +4,7 @@
8222 #ifdef __KERNEL__
8223
8224 #include <asm/reg.h>
8225+#include <linux/const.h>
8226
8227 /* bytes per L1 cache line */
8228 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
8229@@ -23,7 +24,7 @@
8230 #define L1_CACHE_SHIFT 7
8231 #endif
8232
8233-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8234+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8235
8236 #define SMP_CACHE_BYTES L1_CACHE_BYTES
8237
8238diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
8239index 57d289a..b36c98c 100644
8240--- a/arch/powerpc/include/asm/elf.h
8241+++ b/arch/powerpc/include/asm/elf.h
8242@@ -30,6 +30,18 @@
8243
8244 #define ELF_ET_DYN_BASE 0x20000000
8245
8246+#ifdef CONFIG_PAX_ASLR
8247+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
8248+
8249+#ifdef __powerpc64__
8250+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
8251+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
8252+#else
8253+#define PAX_DELTA_MMAP_LEN 15
8254+#define PAX_DELTA_STACK_LEN 15
8255+#endif
8256+#endif
8257+
8258 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
8259
8260 /*
8261@@ -128,10 +140,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8262 (0x7ff >> (PAGE_SHIFT - 12)) : \
8263 (0x3ffff >> (PAGE_SHIFT - 12)))
8264
8265-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8266-#define arch_randomize_brk arch_randomize_brk
8267-
8268-
8269 #ifdef CONFIG_SPU_BASE
8270 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
8271 #define NT_SPU 1
8272diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
8273index 8196e9c..d83a9f3 100644
8274--- a/arch/powerpc/include/asm/exec.h
8275+++ b/arch/powerpc/include/asm/exec.h
8276@@ -4,6 +4,6 @@
8277 #ifndef _ASM_POWERPC_EXEC_H
8278 #define _ASM_POWERPC_EXEC_H
8279
8280-extern unsigned long arch_align_stack(unsigned long sp);
8281+#define arch_align_stack(x) ((x) & ~0xfUL)
8282
8283 #endif /* _ASM_POWERPC_EXEC_H */
8284diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
8285index 5acabbd..7ea14fa 100644
8286--- a/arch/powerpc/include/asm/kmap_types.h
8287+++ b/arch/powerpc/include/asm/kmap_types.h
8288@@ -10,7 +10,7 @@
8289 * 2 of the License, or (at your option) any later version.
8290 */
8291
8292-#define KM_TYPE_NR 16
8293+#define KM_TYPE_NR 17
8294
8295 #endif /* __KERNEL__ */
8296 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
8297diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
8298index b8da913..c02b593 100644
8299--- a/arch/powerpc/include/asm/local.h
8300+++ b/arch/powerpc/include/asm/local.h
8301@@ -9,21 +9,65 @@ typedef struct
8302 atomic_long_t a;
8303 } local_t;
8304
8305+typedef struct
8306+{
8307+ atomic_long_unchecked_t a;
8308+} local_unchecked_t;
8309+
8310 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
8311
8312 #define local_read(l) atomic_long_read(&(l)->a)
8313+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
8314 #define local_set(l,i) atomic_long_set(&(l)->a, (i))
8315+#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
8316
8317 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
8318+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
8319 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
8320+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
8321 #define local_inc(l) atomic_long_inc(&(l)->a)
8322+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
8323 #define local_dec(l) atomic_long_dec(&(l)->a)
8324+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
8325
8326 static __inline__ long local_add_return(long a, local_t *l)
8327 {
8328 long t;
8329
8330 __asm__ __volatile__(
8331+"1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n"
8332+
8333+#ifdef CONFIG_PAX_REFCOUNT
8334+" mcrxr cr0\n"
8335+" addo. %0,%1,%0\n"
8336+" bf 4*cr0+so, 3f\n"
8337+"2:.long " "0x00c00b00""\n"
8338+#else
8339+" add %0,%1,%0\n"
8340+#endif
8341+
8342+"3:\n"
8343+ PPC405_ERR77(0,%2)
8344+ PPC_STLCX "%0,0,%2 \n\
8345+ bne- 1b"
8346+
8347+#ifdef CONFIG_PAX_REFCOUNT
8348+"\n4:\n"
8349+ _ASM_EXTABLE(2b, 4b)
8350+#endif
8351+
8352+ : "=&r" (t)
8353+ : "r" (a), "r" (&(l->a.counter))
8354+ : "cc", "memory");
8355+
8356+ return t;
8357+}
8358+
8359+static __inline__ long local_add_return_unchecked(long a, local_unchecked_t *l)
8360+{
8361+ long t;
8362+
8363+ __asm__ __volatile__(
8364 "1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n\
8365 add %0,%1,%0\n"
8366 PPC405_ERR77(0,%2)
8367@@ -101,6 +145,8 @@ static __inline__ long local_dec_return(local_t *l)
8368
8369 #define local_cmpxchg(l, o, n) \
8370 (cmpxchg_local(&((l)->a.counter), (o), (n)))
8371+#define local_cmpxchg_unchecked(l, o, n) \
8372+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
8373 #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
8374
8375 /**
8376diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
8377index 8565c25..2865190 100644
8378--- a/arch/powerpc/include/asm/mman.h
8379+++ b/arch/powerpc/include/asm/mman.h
8380@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
8381 }
8382 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
8383
8384-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
8385+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
8386 {
8387 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
8388 }
8389diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
8390index 69c0598..2c56964 100644
8391--- a/arch/powerpc/include/asm/page.h
8392+++ b/arch/powerpc/include/asm/page.h
8393@@ -227,8 +227,9 @@ extern long long virt_phys_offset;
8394 * and needs to be executable. This means the whole heap ends
8395 * up being executable.
8396 */
8397-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8398- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8399+#define VM_DATA_DEFAULT_FLAGS32 \
8400+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8401+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8402
8403 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8404 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8405@@ -256,6 +257,9 @@ extern long long virt_phys_offset;
8406 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
8407 #endif
8408
8409+#define ktla_ktva(addr) (addr)
8410+#define ktva_ktla(addr) (addr)
8411+
8412 #ifndef CONFIG_PPC_BOOK3S_64
8413 /*
8414 * Use the top bit of the higher-level page table entries to indicate whether
8415diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
8416index d908a46..3753f71 100644
8417--- a/arch/powerpc/include/asm/page_64.h
8418+++ b/arch/powerpc/include/asm/page_64.h
8419@@ -172,15 +172,18 @@ do { \
8420 * stack by default, so in the absence of a PT_GNU_STACK program header
8421 * we turn execute permission off.
8422 */
8423-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8424- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8425+#define VM_STACK_DEFAULT_FLAGS32 \
8426+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8427+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8428
8429 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8430 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8431
8432+#ifndef CONFIG_PAX_PAGEEXEC
8433 #define VM_STACK_DEFAULT_FLAGS \
8434 (is_32bit_task() ? \
8435 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
8436+#endif
8437
8438 #include <asm-generic/getorder.h>
8439
8440diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
8441index 4b0be20..c15a27d 100644
8442--- a/arch/powerpc/include/asm/pgalloc-64.h
8443+++ b/arch/powerpc/include/asm/pgalloc-64.h
8444@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
8445 #ifndef CONFIG_PPC_64K_PAGES
8446
8447 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
8448+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
8449
8450 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
8451 {
8452@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8453 pud_set(pud, (unsigned long)pmd);
8454 }
8455
8456+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8457+{
8458+ pud_populate(mm, pud, pmd);
8459+}
8460+
8461 #define pmd_populate(mm, pmd, pte_page) \
8462 pmd_populate_kernel(mm, pmd, page_address(pte_page))
8463 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
8464@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
8465 #endif
8466
8467 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
8468+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
8469
8470 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
8471 pte_t *pte)
8472diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
8473index 9835ac4..900430f 100644
8474--- a/arch/powerpc/include/asm/pgtable.h
8475+++ b/arch/powerpc/include/asm/pgtable.h
8476@@ -2,6 +2,7 @@
8477 #define _ASM_POWERPC_PGTABLE_H
8478 #ifdef __KERNEL__
8479
8480+#include <linux/const.h>
8481 #ifndef __ASSEMBLY__
8482 #include <linux/mmdebug.h>
8483 #include <linux/mmzone.h>
8484diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
8485index 62cfb0c..50c6402 100644
8486--- a/arch/powerpc/include/asm/pte-hash32.h
8487+++ b/arch/powerpc/include/asm/pte-hash32.h
8488@@ -20,6 +20,7 @@
8489 #define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
8490 #define _PAGE_USER 0x004 /* usermode access allowed */
8491 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
8492+#define _PAGE_EXEC _PAGE_GUARDED
8493 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
8494 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
8495 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
8496diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
8497index af56b5c..f86f3f6 100644
8498--- a/arch/powerpc/include/asm/reg.h
8499+++ b/arch/powerpc/include/asm/reg.h
8500@@ -253,6 +253,7 @@
8501 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
8502 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
8503 #define DSISR_NOHPTE 0x40000000 /* no translation found */
8504+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
8505 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
8506 #define DSISR_ISSTORE 0x02000000 /* access was a store */
8507 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
8508diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
8509index d607df5..08dc9ae 100644
8510--- a/arch/powerpc/include/asm/smp.h
8511+++ b/arch/powerpc/include/asm/smp.h
8512@@ -51,7 +51,7 @@ struct smp_ops_t {
8513 int (*cpu_disable)(void);
8514 void (*cpu_die)(unsigned int nr);
8515 int (*cpu_bootable)(unsigned int nr);
8516-};
8517+} __no_const;
8518
8519 extern void smp_send_debugger_break(void);
8520 extern void start_secondary_resume(void);
8521diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
8522index 4dbe072..b803275 100644
8523--- a/arch/powerpc/include/asm/spinlock.h
8524+++ b/arch/powerpc/include/asm/spinlock.h
8525@@ -204,13 +204,29 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
8526 __asm__ __volatile__(
8527 "1: " PPC_LWARX(%0,0,%1,1) "\n"
8528 __DO_SIGN_EXTEND
8529-" addic. %0,%0,1\n\
8530- ble- 2f\n"
8531+
8532+#ifdef CONFIG_PAX_REFCOUNT
8533+" mcrxr cr0\n"
8534+" addico. %0,%0,1\n"
8535+" bf 4*cr0+so, 3f\n"
8536+"2:.long " "0x00c00b00""\n"
8537+#else
8538+" addic. %0,%0,1\n"
8539+#endif
8540+
8541+"3:\n"
8542+ "ble- 4f\n"
8543 PPC405_ERR77(0,%1)
8544 " stwcx. %0,0,%1\n\
8545 bne- 1b\n"
8546 PPC_ACQUIRE_BARRIER
8547-"2:" : "=&r" (tmp)
8548+"4:"
8549+
8550+#ifdef CONFIG_PAX_REFCOUNT
8551+ _ASM_EXTABLE(2b,4b)
8552+#endif
8553+
8554+ : "=&r" (tmp)
8555 : "r" (&rw->lock)
8556 : "cr0", "xer", "memory");
8557
8558@@ -286,11 +302,27 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
8559 __asm__ __volatile__(
8560 "# read_unlock\n\t"
8561 PPC_RELEASE_BARRIER
8562-"1: lwarx %0,0,%1\n\
8563- addic %0,%0,-1\n"
8564+"1: lwarx %0,0,%1\n"
8565+
8566+#ifdef CONFIG_PAX_REFCOUNT
8567+" mcrxr cr0\n"
8568+" addico. %0,%0,-1\n"
8569+" bf 4*cr0+so, 3f\n"
8570+"2:.long " "0x00c00b00""\n"
8571+#else
8572+" addic. %0,%0,-1\n"
8573+#endif
8574+
8575+"3:\n"
8576 PPC405_ERR77(0,%1)
8577 " stwcx. %0,0,%1\n\
8578 bne- 1b"
8579+
8580+#ifdef CONFIG_PAX_REFCOUNT
8581+"\n4:\n"
8582+ _ASM_EXTABLE(2b, 4b)
8583+#endif
8584+
8585 : "=&r"(tmp)
8586 : "r"(&rw->lock)
8587 : "cr0", "xer", "memory");
8588diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
8589index 7248979..80b75de 100644
8590--- a/arch/powerpc/include/asm/thread_info.h
8591+++ b/arch/powerpc/include/asm/thread_info.h
8592@@ -103,6 +103,8 @@ static inline struct thread_info *current_thread_info(void)
8593 #if defined(CONFIG_PPC64)
8594 #define TIF_ELF2ABI 18 /* function descriptors must die! */
8595 #endif
8596+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
8597+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
8598
8599 /* as above, but as bit values */
8600 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
8601@@ -121,9 +123,10 @@ static inline struct thread_info *current_thread_info(void)
8602 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8603 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
8604 #define _TIF_NOHZ (1<<TIF_NOHZ)
8605+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8606 #define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
8607 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
8608- _TIF_NOHZ)
8609+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
8610
8611 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
8612 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
8613diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
8614index a0c071d..49cdc7f 100644
8615--- a/arch/powerpc/include/asm/uaccess.h
8616+++ b/arch/powerpc/include/asm/uaccess.h
8617@@ -58,6 +58,7 @@
8618
8619 #endif
8620
8621+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
8622 #define access_ok(type, addr, size) \
8623 (__chk_user_ptr(addr), \
8624 __access_ok((__force unsigned long)(addr), (size), get_fs()))
8625@@ -318,52 +319,6 @@ do { \
8626 extern unsigned long __copy_tofrom_user(void __user *to,
8627 const void __user *from, unsigned long size);
8628
8629-#ifndef __powerpc64__
8630-
8631-static inline unsigned long copy_from_user(void *to,
8632- const void __user *from, unsigned long n)
8633-{
8634- unsigned long over;
8635-
8636- if (access_ok(VERIFY_READ, from, n))
8637- return __copy_tofrom_user((__force void __user *)to, from, n);
8638- if ((unsigned long)from < TASK_SIZE) {
8639- over = (unsigned long)from + n - TASK_SIZE;
8640- return __copy_tofrom_user((__force void __user *)to, from,
8641- n - over) + over;
8642- }
8643- return n;
8644-}
8645-
8646-static inline unsigned long copy_to_user(void __user *to,
8647- const void *from, unsigned long n)
8648-{
8649- unsigned long over;
8650-
8651- if (access_ok(VERIFY_WRITE, to, n))
8652- return __copy_tofrom_user(to, (__force void __user *)from, n);
8653- if ((unsigned long)to < TASK_SIZE) {
8654- over = (unsigned long)to + n - TASK_SIZE;
8655- return __copy_tofrom_user(to, (__force void __user *)from,
8656- n - over) + over;
8657- }
8658- return n;
8659-}
8660-
8661-#else /* __powerpc64__ */
8662-
8663-#define __copy_in_user(to, from, size) \
8664- __copy_tofrom_user((to), (from), (size))
8665-
8666-extern unsigned long copy_from_user(void *to, const void __user *from,
8667- unsigned long n);
8668-extern unsigned long copy_to_user(void __user *to, const void *from,
8669- unsigned long n);
8670-extern unsigned long copy_in_user(void __user *to, const void __user *from,
8671- unsigned long n);
8672-
8673-#endif /* __powerpc64__ */
8674-
8675 static inline unsigned long __copy_from_user_inatomic(void *to,
8676 const void __user *from, unsigned long n)
8677 {
8678@@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
8679 if (ret == 0)
8680 return 0;
8681 }
8682+
8683+ if (!__builtin_constant_p(n))
8684+ check_object_size(to, n, false);
8685+
8686 return __copy_tofrom_user((__force void __user *)to, from, n);
8687 }
8688
8689@@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
8690 if (ret == 0)
8691 return 0;
8692 }
8693+
8694+ if (!__builtin_constant_p(n))
8695+ check_object_size(from, n, true);
8696+
8697 return __copy_tofrom_user(to, (__force const void __user *)from, n);
8698 }
8699
8700@@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to,
8701 return __copy_to_user_inatomic(to, from, size);
8702 }
8703
8704+#ifndef __powerpc64__
8705+
8706+static inline unsigned long __must_check copy_from_user(void *to,
8707+ const void __user *from, unsigned long n)
8708+{
8709+ unsigned long over;
8710+
8711+ if ((long)n < 0)
8712+ return n;
8713+
8714+ if (access_ok(VERIFY_READ, from, n)) {
8715+ if (!__builtin_constant_p(n))
8716+ check_object_size(to, n, false);
8717+ return __copy_tofrom_user((__force void __user *)to, from, n);
8718+ }
8719+ if ((unsigned long)from < TASK_SIZE) {
8720+ over = (unsigned long)from + n - TASK_SIZE;
8721+ if (!__builtin_constant_p(n - over))
8722+ check_object_size(to, n - over, false);
8723+ return __copy_tofrom_user((__force void __user *)to, from,
8724+ n - over) + over;
8725+ }
8726+ return n;
8727+}
8728+
8729+static inline unsigned long __must_check copy_to_user(void __user *to,
8730+ const void *from, unsigned long n)
8731+{
8732+ unsigned long over;
8733+
8734+ if ((long)n < 0)
8735+ return n;
8736+
8737+ if (access_ok(VERIFY_WRITE, to, n)) {
8738+ if (!__builtin_constant_p(n))
8739+ check_object_size(from, n, true);
8740+ return __copy_tofrom_user(to, (__force void __user *)from, n);
8741+ }
8742+ if ((unsigned long)to < TASK_SIZE) {
8743+ over = (unsigned long)to + n - TASK_SIZE;
8744+ if (!__builtin_constant_p(n))
8745+ check_object_size(from, n - over, true);
8746+ return __copy_tofrom_user(to, (__force void __user *)from,
8747+ n - over) + over;
8748+ }
8749+ return n;
8750+}
8751+
8752+#else /* __powerpc64__ */
8753+
8754+#define __copy_in_user(to, from, size) \
8755+ __copy_tofrom_user((to), (from), (size))
8756+
8757+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
8758+{
8759+ if ((long)n < 0 || n > INT_MAX)
8760+ return n;
8761+
8762+ if (!__builtin_constant_p(n))
8763+ check_object_size(to, n, false);
8764+
8765+ if (likely(access_ok(VERIFY_READ, from, n)))
8766+ n = __copy_from_user(to, from, n);
8767+ else
8768+ memset(to, 0, n);
8769+ return n;
8770+}
8771+
8772+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
8773+{
8774+ if ((long)n < 0 || n > INT_MAX)
8775+ return n;
8776+
8777+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
8778+ if (!__builtin_constant_p(n))
8779+ check_object_size(from, n, true);
8780+ n = __copy_to_user(to, from, n);
8781+ }
8782+ return n;
8783+}
8784+
8785+extern unsigned long copy_in_user(void __user *to, const void __user *from,
8786+ unsigned long n);
8787+
8788+#endif /* __powerpc64__ */
8789+
8790 extern unsigned long __clear_user(void __user *addr, unsigned long size);
8791
8792 static inline unsigned long clear_user(void __user *addr, unsigned long size)
8793diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
8794index 502cf69..53936a1 100644
8795--- a/arch/powerpc/kernel/Makefile
8796+++ b/arch/powerpc/kernel/Makefile
8797@@ -15,6 +15,11 @@ CFLAGS_prom_init.o += -fPIC
8798 CFLAGS_btext.o += -fPIC
8799 endif
8800
8801+CFLAGS_REMOVE_cputable.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8802+CFLAGS_REMOVE_prom_init.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8803+CFLAGS_REMOVE_btext.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8804+CFLAGS_REMOVE_prom.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8805+
8806 ifdef CONFIG_FUNCTION_TRACER
8807 # Do not trace early boot code
8808 CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
8809@@ -27,6 +32,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
8810 CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
8811 endif
8812
8813+CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8814+
8815 obj-y := cputable.o ptrace.o syscalls.o \
8816 irq.o align.o signal_32.o pmc.o vdso.o \
8817 process.o systbl.o idle.o \
8818diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
8819index 3e68d1c..72a5ee6 100644
8820--- a/arch/powerpc/kernel/exceptions-64e.S
8821+++ b/arch/powerpc/kernel/exceptions-64e.S
8822@@ -1010,6 +1010,7 @@ storage_fault_common:
8823 std r14,_DAR(r1)
8824 std r15,_DSISR(r1)
8825 addi r3,r1,STACK_FRAME_OVERHEAD
8826+ bl save_nvgprs
8827 mr r4,r14
8828 mr r5,r15
8829 ld r14,PACA_EXGEN+EX_R14(r13)
8830@@ -1018,8 +1019,7 @@ storage_fault_common:
8831 cmpdi r3,0
8832 bne- 1f
8833 b ret_from_except_lite
8834-1: bl save_nvgprs
8835- mr r5,r3
8836+1: mr r5,r3
8837 addi r3,r1,STACK_FRAME_OVERHEAD
8838 ld r4,_DAR(r1)
8839 bl bad_page_fault
8840diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
8841index 9519e6b..13f6c38 100644
8842--- a/arch/powerpc/kernel/exceptions-64s.S
8843+++ b/arch/powerpc/kernel/exceptions-64s.S
8844@@ -1599,10 +1599,10 @@ handle_page_fault:
8845 11: ld r4,_DAR(r1)
8846 ld r5,_DSISR(r1)
8847 addi r3,r1,STACK_FRAME_OVERHEAD
8848+ bl save_nvgprs
8849 bl do_page_fault
8850 cmpdi r3,0
8851 beq+ 12f
8852- bl save_nvgprs
8853 mr r5,r3
8854 addi r3,r1,STACK_FRAME_OVERHEAD
8855 lwz r4,_DAR(r1)
8856diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
8857index 4509603..cdb491f 100644
8858--- a/arch/powerpc/kernel/irq.c
8859+++ b/arch/powerpc/kernel/irq.c
8860@@ -460,6 +460,8 @@ void migrate_irqs(void)
8861 }
8862 #endif
8863
8864+extern void gr_handle_kernel_exploit(void);
8865+
8866 static inline void check_stack_overflow(void)
8867 {
8868 #ifdef CONFIG_DEBUG_STACKOVERFLOW
8869@@ -472,6 +474,7 @@ static inline void check_stack_overflow(void)
8870 pr_err("do_IRQ: stack overflow: %ld\n",
8871 sp - sizeof(struct thread_info));
8872 dump_stack();
8873+ gr_handle_kernel_exploit();
8874 }
8875 #endif
8876 }
8877diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
8878index c94d2e0..992a9ce 100644
8879--- a/arch/powerpc/kernel/module_32.c
8880+++ b/arch/powerpc/kernel/module_32.c
8881@@ -158,7 +158,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
8882 me->arch.core_plt_section = i;
8883 }
8884 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
8885- pr_err("Module doesn't contain .plt or .init.plt sections.\n");
8886+ pr_err("Module $s doesn't contain .plt or .init.plt sections.\n", me->name);
8887 return -ENOEXEC;
8888 }
8889
8890@@ -188,11 +188,16 @@ static uint32_t do_plt_call(void *location,
8891
8892 pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
8893 /* Init, or core PLT? */
8894- if (location >= mod->module_core
8895- && location < mod->module_core + mod->core_size)
8896+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
8897+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
8898 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
8899- else
8900+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
8901+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
8902 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
8903+ else {
8904+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
8905+ return ~0UL;
8906+ }
8907
8908 /* Find this entry, or if that fails, the next avail. entry */
8909 while (entry->jump[0]) {
8910@@ -296,7 +301,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
8911 }
8912 #ifdef CONFIG_DYNAMIC_FTRACE
8913 module->arch.tramp =
8914- do_plt_call(module->module_core,
8915+ do_plt_call(module->module_core_rx,
8916 (unsigned long)ftrace_caller,
8917 sechdrs, module);
8918 #endif
8919diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
8920index b4cc7be..1fe8bb3 100644
8921--- a/arch/powerpc/kernel/process.c
8922+++ b/arch/powerpc/kernel/process.c
8923@@ -1036,8 +1036,8 @@ void show_regs(struct pt_regs * regs)
8924 * Lookup NIP late so we have the best change of getting the
8925 * above info out without failing
8926 */
8927- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
8928- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
8929+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
8930+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
8931 #endif
8932 show_stack(current, (unsigned long *) regs->gpr[1]);
8933 if (!user_mode(regs))
8934@@ -1549,10 +1549,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8935 newsp = stack[0];
8936 ip = stack[STACK_FRAME_LR_SAVE];
8937 if (!firstframe || ip != lr) {
8938- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
8939+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
8940 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
8941 if ((ip == rth) && curr_frame >= 0) {
8942- printk(" (%pS)",
8943+ printk(" (%pA)",
8944 (void *)current->ret_stack[curr_frame].ret);
8945 curr_frame--;
8946 }
8947@@ -1572,7 +1572,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8948 struct pt_regs *regs = (struct pt_regs *)
8949 (sp + STACK_FRAME_OVERHEAD);
8950 lr = regs->link;
8951- printk("--- interrupt: %lx at %pS\n LR = %pS\n",
8952+ printk("--- interrupt: %lx at %pA\n LR = %pA\n",
8953 regs->trap, (void *)regs->nip, (void *)lr);
8954 firstframe = 1;
8955 }
8956@@ -1608,49 +1608,3 @@ void notrace __ppc64_runlatch_off(void)
8957 mtspr(SPRN_CTRLT, ctrl);
8958 }
8959 #endif /* CONFIG_PPC64 */
8960-
8961-unsigned long arch_align_stack(unsigned long sp)
8962-{
8963- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
8964- sp -= get_random_int() & ~PAGE_MASK;
8965- return sp & ~0xf;
8966-}
8967-
8968-static inline unsigned long brk_rnd(void)
8969-{
8970- unsigned long rnd = 0;
8971-
8972- /* 8MB for 32bit, 1GB for 64bit */
8973- if (is_32bit_task())
8974- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
8975- else
8976- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
8977-
8978- return rnd << PAGE_SHIFT;
8979-}
8980-
8981-unsigned long arch_randomize_brk(struct mm_struct *mm)
8982-{
8983- unsigned long base = mm->brk;
8984- unsigned long ret;
8985-
8986-#ifdef CONFIG_PPC_STD_MMU_64
8987- /*
8988- * If we are using 1TB segments and we are allowed to randomise
8989- * the heap, we can put it above 1TB so it is backed by a 1TB
8990- * segment. Otherwise the heap will be in the bottom 1TB
8991- * which always uses 256MB segments and this may result in a
8992- * performance penalty.
8993- */
8994- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
8995- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
8996-#endif
8997-
8998- ret = PAGE_ALIGN(base + brk_rnd());
8999-
9000- if (ret < mm->brk)
9001- return mm->brk;
9002-
9003- return ret;
9004-}
9005-
9006diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
9007index f21897b..28c0428 100644
9008--- a/arch/powerpc/kernel/ptrace.c
9009+++ b/arch/powerpc/kernel/ptrace.c
9010@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
9011 return ret;
9012 }
9013
9014+#ifdef CONFIG_GRKERNSEC_SETXID
9015+extern void gr_delayed_cred_worker(void);
9016+#endif
9017+
9018 /*
9019 * We must return the syscall number to actually look up in the table.
9020 * This can be -1L to skip running any syscall at all.
9021@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
9022
9023 secure_computing_strict(regs->gpr[0]);
9024
9025+#ifdef CONFIG_GRKERNSEC_SETXID
9026+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9027+ gr_delayed_cred_worker();
9028+#endif
9029+
9030 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
9031 tracehook_report_syscall_entry(regs))
9032 /*
9033@@ -1805,6 +1814,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
9034 {
9035 int step;
9036
9037+#ifdef CONFIG_GRKERNSEC_SETXID
9038+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9039+ gr_delayed_cred_worker();
9040+#endif
9041+
9042 audit_syscall_exit(regs);
9043
9044 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
9045diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
9046index d3a831a..3a33123 100644
9047--- a/arch/powerpc/kernel/signal_32.c
9048+++ b/arch/powerpc/kernel/signal_32.c
9049@@ -1011,7 +1011,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
9050 /* Save user registers on the stack */
9051 frame = &rt_sf->uc.uc_mcontext;
9052 addr = frame;
9053- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
9054+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9055 sigret = 0;
9056 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
9057 } else {
9058diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
9059index c7c24d2..1bf7039 100644
9060--- a/arch/powerpc/kernel/signal_64.c
9061+++ b/arch/powerpc/kernel/signal_64.c
9062@@ -754,7 +754,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
9063 current->thread.fp_state.fpscr = 0;
9064
9065 /* Set up to return from userspace. */
9066- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
9067+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9068 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
9069 } else {
9070 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
9071diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
9072index 19e4744..28a8d7b 100644
9073--- a/arch/powerpc/kernel/traps.c
9074+++ b/arch/powerpc/kernel/traps.c
9075@@ -36,6 +36,7 @@
9076 #include <linux/debugfs.h>
9077 #include <linux/ratelimit.h>
9078 #include <linux/context_tracking.h>
9079+#include <linux/uaccess.h>
9080
9081 #include <asm/emulated_ops.h>
9082 #include <asm/pgtable.h>
9083@@ -142,6 +143,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
9084 return flags;
9085 }
9086
9087+extern void gr_handle_kernel_exploit(void);
9088+
9089 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9090 int signr)
9091 {
9092@@ -191,6 +194,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9093 panic("Fatal exception in interrupt");
9094 if (panic_on_oops)
9095 panic("Fatal exception");
9096+
9097+ gr_handle_kernel_exploit();
9098+
9099 do_exit(signr);
9100 }
9101
9102@@ -1137,6 +1143,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
9103 enum ctx_state prev_state = exception_enter();
9104 unsigned int reason = get_reason(regs);
9105
9106+#ifdef CONFIG_PAX_REFCOUNT
9107+ unsigned int bkpt;
9108+ const struct exception_table_entry *entry;
9109+
9110+ if (reason & REASON_ILLEGAL) {
9111+ /* Check if PaX bad instruction */
9112+ if (!probe_kernel_address(regs->nip, bkpt) && bkpt == 0xc00b00) {
9113+ current->thread.trap_nr = 0;
9114+ pax_report_refcount_overflow(regs);
9115+ /* fixup_exception() for PowerPC does not exist, simulate its job */
9116+ if ((entry = search_exception_tables(regs->nip)) != NULL) {
9117+ regs->nip = entry->fixup;
9118+ return;
9119+ }
9120+ /* fixup_exception() could not handle */
9121+ goto bail;
9122+ }
9123+ }
9124+#endif
9125+
9126 /* We can now get here via a FP Unavailable exception if the core
9127 * has no FPU, in that case the reason flags will be 0 */
9128
9129diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
9130index 305eb0d..accc5b40 100644
9131--- a/arch/powerpc/kernel/vdso.c
9132+++ b/arch/powerpc/kernel/vdso.c
9133@@ -34,6 +34,7 @@
9134 #include <asm/vdso.h>
9135 #include <asm/vdso_datapage.h>
9136 #include <asm/setup.h>
9137+#include <asm/mman.h>
9138
9139 #undef DEBUG
9140
9141@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9142 vdso_base = VDSO32_MBASE;
9143 #endif
9144
9145- current->mm->context.vdso_base = 0;
9146+ current->mm->context.vdso_base = ~0UL;
9147
9148 /* vDSO has a problem and was disabled, just don't "enable" it for the
9149 * process
9150@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9151 vdso_base = get_unmapped_area(NULL, vdso_base,
9152 (vdso_pages << PAGE_SHIFT) +
9153 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
9154- 0, 0);
9155+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
9156 if (IS_ERR_VALUE(vdso_base)) {
9157 rc = vdso_base;
9158 goto fail_mmapsem;
9159diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
9160index 27c0fac..6ec4a32 100644
9161--- a/arch/powerpc/kvm/powerpc.c
9162+++ b/arch/powerpc/kvm/powerpc.c
9163@@ -1402,7 +1402,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
9164 }
9165 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
9166
9167-int kvm_arch_init(void *opaque)
9168+int kvm_arch_init(const void *opaque)
9169 {
9170 return 0;
9171 }
9172diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
9173index 5eea6f3..5d10396 100644
9174--- a/arch/powerpc/lib/usercopy_64.c
9175+++ b/arch/powerpc/lib/usercopy_64.c
9176@@ -9,22 +9,6 @@
9177 #include <linux/module.h>
9178 #include <asm/uaccess.h>
9179
9180-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9181-{
9182- if (likely(access_ok(VERIFY_READ, from, n)))
9183- n = __copy_from_user(to, from, n);
9184- else
9185- memset(to, 0, n);
9186- return n;
9187-}
9188-
9189-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9190-{
9191- if (likely(access_ok(VERIFY_WRITE, to, n)))
9192- n = __copy_to_user(to, from, n);
9193- return n;
9194-}
9195-
9196 unsigned long copy_in_user(void __user *to, const void __user *from,
9197 unsigned long n)
9198 {
9199@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
9200 return n;
9201 }
9202
9203-EXPORT_SYMBOL(copy_from_user);
9204-EXPORT_SYMBOL(copy_to_user);
9205 EXPORT_SYMBOL(copy_in_user);
9206
9207diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
9208index b396868..3eb6b9f 100644
9209--- a/arch/powerpc/mm/fault.c
9210+++ b/arch/powerpc/mm/fault.c
9211@@ -33,6 +33,10 @@
9212 #include <linux/ratelimit.h>
9213 #include <linux/context_tracking.h>
9214 #include <linux/hugetlb.h>
9215+#include <linux/slab.h>
9216+#include <linux/pagemap.h>
9217+#include <linux/compiler.h>
9218+#include <linux/unistd.h>
9219
9220 #include <asm/firmware.h>
9221 #include <asm/page.h>
9222@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
9223 }
9224 #endif
9225
9226+#ifdef CONFIG_PAX_PAGEEXEC
9227+/*
9228+ * PaX: decide what to do with offenders (regs->nip = fault address)
9229+ *
9230+ * returns 1 when task should be killed
9231+ */
9232+static int pax_handle_fetch_fault(struct pt_regs *regs)
9233+{
9234+ return 1;
9235+}
9236+
9237+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9238+{
9239+ unsigned long i;
9240+
9241+ printk(KERN_ERR "PAX: bytes at PC: ");
9242+ for (i = 0; i < 5; i++) {
9243+ unsigned int c;
9244+ if (get_user(c, (unsigned int __user *)pc+i))
9245+ printk(KERN_CONT "???????? ");
9246+ else
9247+ printk(KERN_CONT "%08x ", c);
9248+ }
9249+ printk("\n");
9250+}
9251+#endif
9252+
9253 /*
9254 * Check whether the instruction at regs->nip is a store using
9255 * an update addressing form which will update r1.
9256@@ -227,7 +258,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
9257 * indicate errors in DSISR but can validly be set in SRR1.
9258 */
9259 if (trap == 0x400)
9260- error_code &= 0x48200000;
9261+ error_code &= 0x58200000;
9262 else
9263 is_write = error_code & DSISR_ISSTORE;
9264 #else
9265@@ -383,12 +414,16 @@ good_area:
9266 * "undefined". Of those that can be set, this is the only
9267 * one which seems bad.
9268 */
9269- if (error_code & 0x10000000)
9270+ if (error_code & DSISR_GUARDED)
9271 /* Guarded storage error. */
9272 goto bad_area;
9273 #endif /* CONFIG_8xx */
9274
9275 if (is_exec) {
9276+#ifdef CONFIG_PPC_STD_MMU
9277+ if (error_code & DSISR_GUARDED)
9278+ goto bad_area;
9279+#endif
9280 /*
9281 * Allow execution from readable areas if the MMU does not
9282 * provide separate controls over reading and executing.
9283@@ -483,6 +518,23 @@ bad_area:
9284 bad_area_nosemaphore:
9285 /* User mode accesses cause a SIGSEGV */
9286 if (user_mode(regs)) {
9287+
9288+#ifdef CONFIG_PAX_PAGEEXEC
9289+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
9290+#ifdef CONFIG_PPC_STD_MMU
9291+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
9292+#else
9293+ if (is_exec && regs->nip == address) {
9294+#endif
9295+ switch (pax_handle_fetch_fault(regs)) {
9296+ }
9297+
9298+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
9299+ do_group_exit(SIGKILL);
9300+ }
9301+ }
9302+#endif
9303+
9304 _exception(SIGSEGV, regs, code, address);
9305 goto bail;
9306 }
9307diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
9308index cb8bdbe..cde4bc7 100644
9309--- a/arch/powerpc/mm/mmap.c
9310+++ b/arch/powerpc/mm/mmap.c
9311@@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void)
9312 return sysctl_legacy_va_layout;
9313 }
9314
9315-static unsigned long mmap_rnd(void)
9316+static unsigned long mmap_rnd(struct mm_struct *mm)
9317 {
9318 unsigned long rnd = 0;
9319
9320+#ifdef CONFIG_PAX_RANDMMAP
9321+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9322+#endif
9323+
9324 if (current->flags & PF_RANDOMIZE) {
9325 /* 8MB for 32bit, 1GB for 64bit */
9326 if (is_32bit_task())
9327@@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void)
9328 return rnd << PAGE_SHIFT;
9329 }
9330
9331-static inline unsigned long mmap_base(void)
9332+static inline unsigned long mmap_base(struct mm_struct *mm)
9333 {
9334 unsigned long gap = rlimit(RLIMIT_STACK);
9335
9336@@ -76,7 +80,7 @@ static inline unsigned long mmap_base(void)
9337 else if (gap > MAX_GAP)
9338 gap = MAX_GAP;
9339
9340- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
9341+ return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
9342 }
9343
9344 /*
9345@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9346 */
9347 if (mmap_is_legacy()) {
9348 mm->mmap_base = TASK_UNMAPPED_BASE;
9349+
9350+#ifdef CONFIG_PAX_RANDMMAP
9351+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9352+ mm->mmap_base += mm->delta_mmap;
9353+#endif
9354+
9355 mm->get_unmapped_area = arch_get_unmapped_area;
9356 } else {
9357- mm->mmap_base = mmap_base();
9358+ mm->mmap_base = mmap_base(mm);
9359+
9360+#ifdef CONFIG_PAX_RANDMMAP
9361+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9362+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9363+#endif
9364+
9365 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9366 }
9367 }
9368diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
9369index 0f432a7..abfe841 100644
9370--- a/arch/powerpc/mm/slice.c
9371+++ b/arch/powerpc/mm/slice.c
9372@@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
9373 if ((mm->task_size - len) < addr)
9374 return 0;
9375 vma = find_vma(mm, addr);
9376- return (!vma || (addr + len) <= vma->vm_start);
9377+ return check_heap_stack_gap(vma, addr, len, 0);
9378 }
9379
9380 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
9381@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
9382 info.align_offset = 0;
9383
9384 addr = TASK_UNMAPPED_BASE;
9385+
9386+#ifdef CONFIG_PAX_RANDMMAP
9387+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9388+ addr += mm->delta_mmap;
9389+#endif
9390+
9391 while (addr < TASK_SIZE) {
9392 info.low_limit = addr;
9393 if (!slice_scan_available(addr, available, 1, &addr))
9394@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
9395 if (fixed && addr > (mm->task_size - len))
9396 return -ENOMEM;
9397
9398+#ifdef CONFIG_PAX_RANDMMAP
9399+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
9400+ addr = 0;
9401+#endif
9402+
9403 /* If hint, make sure it matches our alignment restrictions */
9404 if (!fixed && addr) {
9405 addr = _ALIGN_UP(addr, 1ul << pshift);
9406diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
9407index d966bbe..372124a 100644
9408--- a/arch/powerpc/platforms/cell/spufs/file.c
9409+++ b/arch/powerpc/platforms/cell/spufs/file.c
9410@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9411 return VM_FAULT_NOPAGE;
9412 }
9413
9414-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
9415+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
9416 unsigned long address,
9417- void *buf, int len, int write)
9418+ void *buf, size_t len, int write)
9419 {
9420 struct spu_context *ctx = vma->vm_file->private_data;
9421 unsigned long offset = address - vma->vm_start;
9422diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
9423index fa934fe..c296056 100644
9424--- a/arch/s390/include/asm/atomic.h
9425+++ b/arch/s390/include/asm/atomic.h
9426@@ -412,4 +412,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
9427 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
9428 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9429
9430+#define atomic64_read_unchecked(v) atomic64_read(v)
9431+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
9432+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
9433+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
9434+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
9435+#define atomic64_inc_unchecked(v) atomic64_inc(v)
9436+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
9437+#define atomic64_dec_unchecked(v) atomic64_dec(v)
9438+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
9439+
9440 #endif /* __ARCH_S390_ATOMIC__ */
9441diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
9442index 8d72471..5322500 100644
9443--- a/arch/s390/include/asm/barrier.h
9444+++ b/arch/s390/include/asm/barrier.h
9445@@ -42,7 +42,7 @@
9446 do { \
9447 compiletime_assert_atomic_type(*p); \
9448 barrier(); \
9449- ACCESS_ONCE(*p) = (v); \
9450+ ACCESS_ONCE_RW(*p) = (v); \
9451 } while (0)
9452
9453 #define smp_load_acquire(p) \
9454diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
9455index 4d7ccac..d03d0ad 100644
9456--- a/arch/s390/include/asm/cache.h
9457+++ b/arch/s390/include/asm/cache.h
9458@@ -9,8 +9,10 @@
9459 #ifndef __ARCH_S390_CACHE_H
9460 #define __ARCH_S390_CACHE_H
9461
9462-#define L1_CACHE_BYTES 256
9463+#include <linux/const.h>
9464+
9465 #define L1_CACHE_SHIFT 8
9466+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9467 #define NET_SKB_PAD 32
9468
9469 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9470diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
9471index c9c875d..b4b0e4c 100644
9472--- a/arch/s390/include/asm/elf.h
9473+++ b/arch/s390/include/asm/elf.h
9474@@ -163,8 +163,14 @@ extern unsigned int vdso_enabled;
9475 the loader. We need to make sure that it is out of the way of the program
9476 that it will "exec", and that there is sufficient room for the brk. */
9477
9478-extern unsigned long randomize_et_dyn(void);
9479-#define ELF_ET_DYN_BASE randomize_et_dyn()
9480+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
9481+
9482+#ifdef CONFIG_PAX_ASLR
9483+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
9484+
9485+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9486+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9487+#endif
9488
9489 /* This yields a mask that user programs can use to figure out what
9490 instruction set this CPU supports. */
9491@@ -225,9 +231,6 @@ struct linux_binprm;
9492 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
9493 int arch_setup_additional_pages(struct linux_binprm *, int);
9494
9495-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
9496-#define arch_randomize_brk arch_randomize_brk
9497-
9498 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vxrs);
9499
9500 #endif
9501diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
9502index c4a93d6..4d2a9b4 100644
9503--- a/arch/s390/include/asm/exec.h
9504+++ b/arch/s390/include/asm/exec.h
9505@@ -7,6 +7,6 @@
9506 #ifndef __ASM_EXEC_H
9507 #define __ASM_EXEC_H
9508
9509-extern unsigned long arch_align_stack(unsigned long sp);
9510+#define arch_align_stack(x) ((x) & ~0xfUL)
9511
9512 #endif /* __ASM_EXEC_H */
9513diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
9514index cd4c68e..6764641 100644
9515--- a/arch/s390/include/asm/uaccess.h
9516+++ b/arch/s390/include/asm/uaccess.h
9517@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
9518 __range_ok((unsigned long)(addr), (size)); \
9519 })
9520
9521+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
9522 #define access_ok(type, addr, size) __access_ok(addr, size)
9523
9524 /*
9525@@ -275,6 +276,10 @@ static inline unsigned long __must_check
9526 copy_to_user(void __user *to, const void *from, unsigned long n)
9527 {
9528 might_fault();
9529+
9530+ if ((long)n < 0)
9531+ return n;
9532+
9533 return __copy_to_user(to, from, n);
9534 }
9535
9536@@ -303,10 +308,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
9537 static inline unsigned long __must_check
9538 copy_from_user(void *to, const void __user *from, unsigned long n)
9539 {
9540- unsigned int sz = __compiletime_object_size(to);
9541+ size_t sz = __compiletime_object_size(to);
9542
9543 might_fault();
9544- if (unlikely(sz != -1 && sz < n)) {
9545+
9546+ if ((long)n < 0)
9547+ return n;
9548+
9549+ if (unlikely(sz != (size_t)-1 && sz < n)) {
9550 copy_from_user_overflow();
9551 return n;
9552 }
9553diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
9554index 2ca9586..55682a9 100644
9555--- a/arch/s390/kernel/module.c
9556+++ b/arch/s390/kernel/module.c
9557@@ -165,11 +165,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
9558
9559 /* Increase core size by size of got & plt and set start
9560 offsets for got and plt. */
9561- me->core_size = ALIGN(me->core_size, 4);
9562- me->arch.got_offset = me->core_size;
9563- me->core_size += me->arch.got_size;
9564- me->arch.plt_offset = me->core_size;
9565- me->core_size += me->arch.plt_size;
9566+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
9567+ me->arch.got_offset = me->core_size_rw;
9568+ me->core_size_rw += me->arch.got_size;
9569+ me->arch.plt_offset = me->core_size_rx;
9570+ me->core_size_rx += me->arch.plt_size;
9571 return 0;
9572 }
9573
9574@@ -285,7 +285,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9575 if (info->got_initialized == 0) {
9576 Elf_Addr *gotent;
9577
9578- gotent = me->module_core + me->arch.got_offset +
9579+ gotent = me->module_core_rw + me->arch.got_offset +
9580 info->got_offset;
9581 *gotent = val;
9582 info->got_initialized = 1;
9583@@ -308,7 +308,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9584 rc = apply_rela_bits(loc, val, 0, 64, 0);
9585 else if (r_type == R_390_GOTENT ||
9586 r_type == R_390_GOTPLTENT) {
9587- val += (Elf_Addr) me->module_core - loc;
9588+ val += (Elf_Addr) me->module_core_rw - loc;
9589 rc = apply_rela_bits(loc, val, 1, 32, 1);
9590 }
9591 break;
9592@@ -321,7 +321,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9593 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
9594 if (info->plt_initialized == 0) {
9595 unsigned int *ip;
9596- ip = me->module_core + me->arch.plt_offset +
9597+ ip = me->module_core_rx + me->arch.plt_offset +
9598 info->plt_offset;
9599 #ifndef CONFIG_64BIT
9600 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
9601@@ -346,7 +346,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9602 val - loc + 0xffffUL < 0x1ffffeUL) ||
9603 (r_type == R_390_PLT32DBL &&
9604 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
9605- val = (Elf_Addr) me->module_core +
9606+ val = (Elf_Addr) me->module_core_rx +
9607 me->arch.plt_offset +
9608 info->plt_offset;
9609 val += rela->r_addend - loc;
9610@@ -368,7 +368,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9611 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
9612 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
9613 val = val + rela->r_addend -
9614- ((Elf_Addr) me->module_core + me->arch.got_offset);
9615+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
9616 if (r_type == R_390_GOTOFF16)
9617 rc = apply_rela_bits(loc, val, 0, 16, 0);
9618 else if (r_type == R_390_GOTOFF32)
9619@@ -378,7 +378,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9620 break;
9621 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
9622 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
9623- val = (Elf_Addr) me->module_core + me->arch.got_offset +
9624+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
9625 rela->r_addend - loc;
9626 if (r_type == R_390_GOTPC)
9627 rc = apply_rela_bits(loc, val, 1, 32, 0);
9628diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
9629index 13fc097..84d375f 100644
9630--- a/arch/s390/kernel/process.c
9631+++ b/arch/s390/kernel/process.c
9632@@ -227,27 +227,3 @@ unsigned long get_wchan(struct task_struct *p)
9633 }
9634 return 0;
9635 }
9636-
9637-unsigned long arch_align_stack(unsigned long sp)
9638-{
9639- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9640- sp -= get_random_int() & ~PAGE_MASK;
9641- return sp & ~0xf;
9642-}
9643-
9644-static inline unsigned long brk_rnd(void)
9645-{
9646- /* 8MB for 32bit, 1GB for 64bit */
9647- if (is_32bit_task())
9648- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
9649- else
9650- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
9651-}
9652-
9653-unsigned long arch_randomize_brk(struct mm_struct *mm)
9654-{
9655- unsigned long ret;
9656-
9657- ret = PAGE_ALIGN(mm->brk + brk_rnd());
9658- return (ret > mm->brk) ? ret : mm->brk;
9659-}
9660diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
9661index 179a2c2..371e85c 100644
9662--- a/arch/s390/mm/mmap.c
9663+++ b/arch/s390/mm/mmap.c
9664@@ -204,9 +204,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9665 */
9666 if (mmap_is_legacy()) {
9667 mm->mmap_base = mmap_base_legacy();
9668+
9669+#ifdef CONFIG_PAX_RANDMMAP
9670+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9671+ mm->mmap_base += mm->delta_mmap;
9672+#endif
9673+
9674 mm->get_unmapped_area = arch_get_unmapped_area;
9675 } else {
9676 mm->mmap_base = mmap_base();
9677+
9678+#ifdef CONFIG_PAX_RANDMMAP
9679+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9680+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9681+#endif
9682+
9683 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9684 }
9685 }
9686@@ -279,9 +291,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9687 */
9688 if (mmap_is_legacy()) {
9689 mm->mmap_base = mmap_base_legacy();
9690+
9691+#ifdef CONFIG_PAX_RANDMMAP
9692+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9693+ mm->mmap_base += mm->delta_mmap;
9694+#endif
9695+
9696 mm->get_unmapped_area = s390_get_unmapped_area;
9697 } else {
9698 mm->mmap_base = mmap_base();
9699+
9700+#ifdef CONFIG_PAX_RANDMMAP
9701+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9702+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9703+#endif
9704+
9705 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
9706 }
9707 }
9708diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
9709index ae3d59f..f65f075 100644
9710--- a/arch/score/include/asm/cache.h
9711+++ b/arch/score/include/asm/cache.h
9712@@ -1,7 +1,9 @@
9713 #ifndef _ASM_SCORE_CACHE_H
9714 #define _ASM_SCORE_CACHE_H
9715
9716+#include <linux/const.h>
9717+
9718 #define L1_CACHE_SHIFT 4
9719-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9720+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9721
9722 #endif /* _ASM_SCORE_CACHE_H */
9723diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
9724index f9f3cd5..58ff438 100644
9725--- a/arch/score/include/asm/exec.h
9726+++ b/arch/score/include/asm/exec.h
9727@@ -1,6 +1,6 @@
9728 #ifndef _ASM_SCORE_EXEC_H
9729 #define _ASM_SCORE_EXEC_H
9730
9731-extern unsigned long arch_align_stack(unsigned long sp);
9732+#define arch_align_stack(x) (x)
9733
9734 #endif /* _ASM_SCORE_EXEC_H */
9735diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
9736index a1519ad3..e8ac1ff 100644
9737--- a/arch/score/kernel/process.c
9738+++ b/arch/score/kernel/process.c
9739@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
9740
9741 return task_pt_regs(task)->cp0_epc;
9742 }
9743-
9744-unsigned long arch_align_stack(unsigned long sp)
9745-{
9746- return sp;
9747-}
9748diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
9749index ef9e555..331bd29 100644
9750--- a/arch/sh/include/asm/cache.h
9751+++ b/arch/sh/include/asm/cache.h
9752@@ -9,10 +9,11 @@
9753 #define __ASM_SH_CACHE_H
9754 #ifdef __KERNEL__
9755
9756+#include <linux/const.h>
9757 #include <linux/init.h>
9758 #include <cpu/cache.h>
9759
9760-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9761+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9762
9763 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9764
9765diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
9766index 6777177..cb5e44f 100644
9767--- a/arch/sh/mm/mmap.c
9768+++ b/arch/sh/mm/mmap.c
9769@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9770 struct mm_struct *mm = current->mm;
9771 struct vm_area_struct *vma;
9772 int do_colour_align;
9773+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9774 struct vm_unmapped_area_info info;
9775
9776 if (flags & MAP_FIXED) {
9777@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9778 if (filp || (flags & MAP_SHARED))
9779 do_colour_align = 1;
9780
9781+#ifdef CONFIG_PAX_RANDMMAP
9782+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9783+#endif
9784+
9785 if (addr) {
9786 if (do_colour_align)
9787 addr = COLOUR_ALIGN(addr, pgoff);
9788@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9789 addr = PAGE_ALIGN(addr);
9790
9791 vma = find_vma(mm, addr);
9792- if (TASK_SIZE - len >= addr &&
9793- (!vma || addr + len <= vma->vm_start))
9794+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9795 return addr;
9796 }
9797
9798 info.flags = 0;
9799 info.length = len;
9800- info.low_limit = TASK_UNMAPPED_BASE;
9801+ info.low_limit = mm->mmap_base;
9802 info.high_limit = TASK_SIZE;
9803 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
9804 info.align_offset = pgoff << PAGE_SHIFT;
9805@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9806 struct mm_struct *mm = current->mm;
9807 unsigned long addr = addr0;
9808 int do_colour_align;
9809+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9810 struct vm_unmapped_area_info info;
9811
9812 if (flags & MAP_FIXED) {
9813@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9814 if (filp || (flags & MAP_SHARED))
9815 do_colour_align = 1;
9816
9817+#ifdef CONFIG_PAX_RANDMMAP
9818+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9819+#endif
9820+
9821 /* requesting a specific address */
9822 if (addr) {
9823 if (do_colour_align)
9824@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9825 addr = PAGE_ALIGN(addr);
9826
9827 vma = find_vma(mm, addr);
9828- if (TASK_SIZE - len >= addr &&
9829- (!vma || addr + len <= vma->vm_start))
9830+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9831 return addr;
9832 }
9833
9834@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9835 VM_BUG_ON(addr != -ENOMEM);
9836 info.flags = 0;
9837 info.low_limit = TASK_UNMAPPED_BASE;
9838+
9839+#ifdef CONFIG_PAX_RANDMMAP
9840+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9841+ info.low_limit += mm->delta_mmap;
9842+#endif
9843+
9844 info.high_limit = TASK_SIZE;
9845 addr = vm_unmapped_area(&info);
9846 }
9847diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
9848index 4082749..fd97781 100644
9849--- a/arch/sparc/include/asm/atomic_64.h
9850+++ b/arch/sparc/include/asm/atomic_64.h
9851@@ -15,18 +15,38 @@
9852 #define ATOMIC64_INIT(i) { (i) }
9853
9854 #define atomic_read(v) ACCESS_ONCE((v)->counter)
9855+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9856+{
9857+ return ACCESS_ONCE(v->counter);
9858+}
9859 #define atomic64_read(v) ACCESS_ONCE((v)->counter)
9860+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9861+{
9862+ return ACCESS_ONCE(v->counter);
9863+}
9864
9865 #define atomic_set(v, i) (((v)->counter) = i)
9866+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9867+{
9868+ v->counter = i;
9869+}
9870 #define atomic64_set(v, i) (((v)->counter) = i)
9871+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9872+{
9873+ v->counter = i;
9874+}
9875
9876-#define ATOMIC_OP(op) \
9877-void atomic_##op(int, atomic_t *); \
9878-void atomic64_##op(long, atomic64_t *);
9879+#define __ATOMIC_OP(op, suffix) \
9880+void atomic_##op##suffix(int, atomic##suffix##_t *); \
9881+void atomic64_##op##suffix(long, atomic64##suffix##_t *);
9882
9883-#define ATOMIC_OP_RETURN(op) \
9884-int atomic_##op##_return(int, atomic_t *); \
9885-long atomic64_##op##_return(long, atomic64_t *);
9886+#define ATOMIC_OP(op) __ATOMIC_OP(op, ) __ATOMIC_OP(op, _unchecked)
9887+
9888+#define __ATOMIC_OP_RETURN(op, suffix) \
9889+int atomic_##op##_return##suffix(int, atomic##suffix##_t *); \
9890+long atomic64_##op##_return##suffix(long, atomic64##suffix##_t *);
9891+
9892+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, ) __ATOMIC_OP_RETURN(op, _unchecked)
9893
9894 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
9895
9896@@ -35,13 +55,23 @@ ATOMIC_OPS(sub)
9897
9898 #undef ATOMIC_OPS
9899 #undef ATOMIC_OP_RETURN
9900+#undef __ATOMIC_OP_RETURN
9901 #undef ATOMIC_OP
9902+#undef __ATOMIC_OP
9903
9904 #define atomic_dec_return(v) atomic_sub_return(1, v)
9905 #define atomic64_dec_return(v) atomic64_sub_return(1, v)
9906
9907 #define atomic_inc_return(v) atomic_add_return(1, v)
9908+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9909+{
9910+ return atomic_add_return_unchecked(1, v);
9911+}
9912 #define atomic64_inc_return(v) atomic64_add_return(1, v)
9913+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9914+{
9915+ return atomic64_add_return_unchecked(1, v);
9916+}
9917
9918 /*
9919 * atomic_inc_and_test - increment and test
9920@@ -52,6 +82,10 @@ ATOMIC_OPS(sub)
9921 * other cases.
9922 */
9923 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
9924+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9925+{
9926+ return atomic_inc_return_unchecked(v) == 0;
9927+}
9928 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
9929
9930 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
9931@@ -61,25 +95,60 @@ ATOMIC_OPS(sub)
9932 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
9933
9934 #define atomic_inc(v) atomic_add(1, v)
9935+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9936+{
9937+ atomic_add_unchecked(1, v);
9938+}
9939 #define atomic64_inc(v) atomic64_add(1, v)
9940+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9941+{
9942+ atomic64_add_unchecked(1, v);
9943+}
9944
9945 #define atomic_dec(v) atomic_sub(1, v)
9946+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9947+{
9948+ atomic_sub_unchecked(1, v);
9949+}
9950 #define atomic64_dec(v) atomic64_sub(1, v)
9951+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9952+{
9953+ atomic64_sub_unchecked(1, v);
9954+}
9955
9956 #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
9957 #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
9958
9959 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
9960+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9961+{
9962+ return cmpxchg(&v->counter, old, new);
9963+}
9964 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
9965+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9966+{
9967+ return xchg(&v->counter, new);
9968+}
9969
9970 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9971 {
9972- int c, old;
9973+ int c, old, new;
9974 c = atomic_read(v);
9975 for (;;) {
9976- if (unlikely(c == (u)))
9977+ if (unlikely(c == u))
9978 break;
9979- old = atomic_cmpxchg((v), c, c + (a));
9980+
9981+ asm volatile("addcc %2, %0, %0\n"
9982+
9983+#ifdef CONFIG_PAX_REFCOUNT
9984+ "tvs %%icc, 6\n"
9985+#endif
9986+
9987+ : "=r" (new)
9988+ : "0" (c), "ir" (a)
9989+ : "cc");
9990+
9991+ old = atomic_cmpxchg(v, c, new);
9992 if (likely(old == c))
9993 break;
9994 c = old;
9995@@ -90,20 +159,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9996 #define atomic64_cmpxchg(v, o, n) \
9997 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
9998 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
9999+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
10000+{
10001+ return xchg(&v->counter, new);
10002+}
10003
10004 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10005 {
10006- long c, old;
10007+ long c, old, new;
10008 c = atomic64_read(v);
10009 for (;;) {
10010- if (unlikely(c == (u)))
10011+ if (unlikely(c == u))
10012 break;
10013- old = atomic64_cmpxchg((v), c, c + (a));
10014+
10015+ asm volatile("addcc %2, %0, %0\n"
10016+
10017+#ifdef CONFIG_PAX_REFCOUNT
10018+ "tvs %%xcc, 6\n"
10019+#endif
10020+
10021+ : "=r" (new)
10022+ : "0" (c), "ir" (a)
10023+ : "cc");
10024+
10025+ old = atomic64_cmpxchg(v, c, new);
10026 if (likely(old == c))
10027 break;
10028 c = old;
10029 }
10030- return c != (u);
10031+ return c != u;
10032 }
10033
10034 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10035diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
10036index 7664894..45a974b 100644
10037--- a/arch/sparc/include/asm/barrier_64.h
10038+++ b/arch/sparc/include/asm/barrier_64.h
10039@@ -60,7 +60,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
10040 do { \
10041 compiletime_assert_atomic_type(*p); \
10042 barrier(); \
10043- ACCESS_ONCE(*p) = (v); \
10044+ ACCESS_ONCE_RW(*p) = (v); \
10045 } while (0)
10046
10047 #define smp_load_acquire(p) \
10048diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
10049index 5bb6991..5c2132e 100644
10050--- a/arch/sparc/include/asm/cache.h
10051+++ b/arch/sparc/include/asm/cache.h
10052@@ -7,10 +7,12 @@
10053 #ifndef _SPARC_CACHE_H
10054 #define _SPARC_CACHE_H
10055
10056+#include <linux/const.h>
10057+
10058 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
10059
10060 #define L1_CACHE_SHIFT 5
10061-#define L1_CACHE_BYTES 32
10062+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10063
10064 #ifdef CONFIG_SPARC32
10065 #define SMP_CACHE_BYTES_SHIFT 5
10066diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
10067index a24e41f..47677ff 100644
10068--- a/arch/sparc/include/asm/elf_32.h
10069+++ b/arch/sparc/include/asm/elf_32.h
10070@@ -114,6 +114,13 @@ typedef struct {
10071
10072 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
10073
10074+#ifdef CONFIG_PAX_ASLR
10075+#define PAX_ELF_ET_DYN_BASE 0x10000UL
10076+
10077+#define PAX_DELTA_MMAP_LEN 16
10078+#define PAX_DELTA_STACK_LEN 16
10079+#endif
10080+
10081 /* This yields a mask that user programs can use to figure out what
10082 instruction set this cpu supports. This can NOT be done in userspace
10083 on Sparc. */
10084diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
10085index 370ca1e..d4f4a98 100644
10086--- a/arch/sparc/include/asm/elf_64.h
10087+++ b/arch/sparc/include/asm/elf_64.h
10088@@ -189,6 +189,13 @@ typedef struct {
10089 #define ELF_ET_DYN_BASE 0x0000010000000000UL
10090 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
10091
10092+#ifdef CONFIG_PAX_ASLR
10093+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
10094+
10095+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
10096+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
10097+#endif
10098+
10099 extern unsigned long sparc64_elf_hwcap;
10100 #define ELF_HWCAP sparc64_elf_hwcap
10101
10102diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
10103index a3890da..f6a408e 100644
10104--- a/arch/sparc/include/asm/pgalloc_32.h
10105+++ b/arch/sparc/include/asm/pgalloc_32.h
10106@@ -35,6 +35,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
10107 }
10108
10109 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
10110+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10111
10112 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
10113 unsigned long address)
10114diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
10115index 5e31871..13469c6 100644
10116--- a/arch/sparc/include/asm/pgalloc_64.h
10117+++ b/arch/sparc/include/asm/pgalloc_64.h
10118@@ -21,6 +21,7 @@ static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
10119 }
10120
10121 #define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD)
10122+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10123
10124 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
10125 {
10126@@ -38,6 +39,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
10127 }
10128
10129 #define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
10130+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
10131
10132 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
10133 {
10134diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
10135index 59ba6f6..4518128 100644
10136--- a/arch/sparc/include/asm/pgtable.h
10137+++ b/arch/sparc/include/asm/pgtable.h
10138@@ -5,4 +5,8 @@
10139 #else
10140 #include <asm/pgtable_32.h>
10141 #endif
10142+
10143+#define ktla_ktva(addr) (addr)
10144+#define ktva_ktla(addr) (addr)
10145+
10146 #endif
10147diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
10148index f06b36a..bca3189 100644
10149--- a/arch/sparc/include/asm/pgtable_32.h
10150+++ b/arch/sparc/include/asm/pgtable_32.h
10151@@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
10152 #define PAGE_SHARED SRMMU_PAGE_SHARED
10153 #define PAGE_COPY SRMMU_PAGE_COPY
10154 #define PAGE_READONLY SRMMU_PAGE_RDONLY
10155+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
10156+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
10157+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
10158 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
10159
10160 /* Top-level page directory - dummy used by init-mm.
10161@@ -63,18 +66,18 @@ extern unsigned long ptr_in_current_pgd;
10162
10163 /* xwr */
10164 #define __P000 PAGE_NONE
10165-#define __P001 PAGE_READONLY
10166-#define __P010 PAGE_COPY
10167-#define __P011 PAGE_COPY
10168+#define __P001 PAGE_READONLY_NOEXEC
10169+#define __P010 PAGE_COPY_NOEXEC
10170+#define __P011 PAGE_COPY_NOEXEC
10171 #define __P100 PAGE_READONLY
10172 #define __P101 PAGE_READONLY
10173 #define __P110 PAGE_COPY
10174 #define __P111 PAGE_COPY
10175
10176 #define __S000 PAGE_NONE
10177-#define __S001 PAGE_READONLY
10178-#define __S010 PAGE_SHARED
10179-#define __S011 PAGE_SHARED
10180+#define __S001 PAGE_READONLY_NOEXEC
10181+#define __S010 PAGE_SHARED_NOEXEC
10182+#define __S011 PAGE_SHARED_NOEXEC
10183 #define __S100 PAGE_READONLY
10184 #define __S101 PAGE_READONLY
10185 #define __S110 PAGE_SHARED
10186diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
10187index ae51a11..eadfd03 100644
10188--- a/arch/sparc/include/asm/pgtsrmmu.h
10189+++ b/arch/sparc/include/asm/pgtsrmmu.h
10190@@ -111,6 +111,11 @@
10191 SRMMU_EXEC | SRMMU_REF)
10192 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
10193 SRMMU_EXEC | SRMMU_REF)
10194+
10195+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
10196+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10197+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10198+
10199 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
10200 SRMMU_DIRTY | SRMMU_REF)
10201
10202diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
10203index 29d64b1..4272fe8 100644
10204--- a/arch/sparc/include/asm/setup.h
10205+++ b/arch/sparc/include/asm/setup.h
10206@@ -55,8 +55,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
10207 void handle_ld_nf(u32 insn, struct pt_regs *regs);
10208
10209 /* init_64.c */
10210-extern atomic_t dcpage_flushes;
10211-extern atomic_t dcpage_flushes_xcall;
10212+extern atomic_unchecked_t dcpage_flushes;
10213+extern atomic_unchecked_t dcpage_flushes_xcall;
10214
10215 extern int sysctl_tsb_ratio;
10216 #endif
10217diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
10218index 9689176..63c18ea 100644
10219--- a/arch/sparc/include/asm/spinlock_64.h
10220+++ b/arch/sparc/include/asm/spinlock_64.h
10221@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
10222
10223 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
10224
10225-static void inline arch_read_lock(arch_rwlock_t *lock)
10226+static inline void arch_read_lock(arch_rwlock_t *lock)
10227 {
10228 unsigned long tmp1, tmp2;
10229
10230 __asm__ __volatile__ (
10231 "1: ldsw [%2], %0\n"
10232 " brlz,pn %0, 2f\n"
10233-"4: add %0, 1, %1\n"
10234+"4: addcc %0, 1, %1\n"
10235+
10236+#ifdef CONFIG_PAX_REFCOUNT
10237+" tvs %%icc, 6\n"
10238+#endif
10239+
10240 " cas [%2], %0, %1\n"
10241 " cmp %0, %1\n"
10242 " bne,pn %%icc, 1b\n"
10243@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
10244 " .previous"
10245 : "=&r" (tmp1), "=&r" (tmp2)
10246 : "r" (lock)
10247- : "memory");
10248+ : "memory", "cc");
10249 }
10250
10251-static int inline arch_read_trylock(arch_rwlock_t *lock)
10252+static inline int arch_read_trylock(arch_rwlock_t *lock)
10253 {
10254 int tmp1, tmp2;
10255
10256@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10257 "1: ldsw [%2], %0\n"
10258 " brlz,a,pn %0, 2f\n"
10259 " mov 0, %0\n"
10260-" add %0, 1, %1\n"
10261+" addcc %0, 1, %1\n"
10262+
10263+#ifdef CONFIG_PAX_REFCOUNT
10264+" tvs %%icc, 6\n"
10265+#endif
10266+
10267 " cas [%2], %0, %1\n"
10268 " cmp %0, %1\n"
10269 " bne,pn %%icc, 1b\n"
10270@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10271 return tmp1;
10272 }
10273
10274-static void inline arch_read_unlock(arch_rwlock_t *lock)
10275+static inline void arch_read_unlock(arch_rwlock_t *lock)
10276 {
10277 unsigned long tmp1, tmp2;
10278
10279 __asm__ __volatile__(
10280 "1: lduw [%2], %0\n"
10281-" sub %0, 1, %1\n"
10282+" subcc %0, 1, %1\n"
10283+
10284+#ifdef CONFIG_PAX_REFCOUNT
10285+" tvs %%icc, 6\n"
10286+#endif
10287+
10288 " cas [%2], %0, %1\n"
10289 " cmp %0, %1\n"
10290 " bne,pn %%xcc, 1b\n"
10291@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
10292 : "memory");
10293 }
10294
10295-static void inline arch_write_lock(arch_rwlock_t *lock)
10296+static inline void arch_write_lock(arch_rwlock_t *lock)
10297 {
10298 unsigned long mask, tmp1, tmp2;
10299
10300@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
10301 : "memory");
10302 }
10303
10304-static void inline arch_write_unlock(arch_rwlock_t *lock)
10305+static inline void arch_write_unlock(arch_rwlock_t *lock)
10306 {
10307 __asm__ __volatile__(
10308 " stw %%g0, [%0]"
10309@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
10310 : "memory");
10311 }
10312
10313-static int inline arch_write_trylock(arch_rwlock_t *lock)
10314+static inline int arch_write_trylock(arch_rwlock_t *lock)
10315 {
10316 unsigned long mask, tmp1, tmp2, result;
10317
10318diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
10319index fd7bd0a..2e2fa7a 100644
10320--- a/arch/sparc/include/asm/thread_info_32.h
10321+++ b/arch/sparc/include/asm/thread_info_32.h
10322@@ -47,6 +47,7 @@ struct thread_info {
10323 struct reg_window32 reg_window[NSWINS]; /* align for ldd! */
10324 unsigned long rwbuf_stkptrs[NSWINS];
10325 unsigned long w_saved;
10326+ unsigned long lowest_stack;
10327 };
10328
10329 /*
10330diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
10331index ff45516..73001ab 100644
10332--- a/arch/sparc/include/asm/thread_info_64.h
10333+++ b/arch/sparc/include/asm/thread_info_64.h
10334@@ -61,6 +61,8 @@ struct thread_info {
10335 struct pt_regs *kern_una_regs;
10336 unsigned int kern_una_insn;
10337
10338+ unsigned long lowest_stack;
10339+
10340 unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
10341 __attribute__ ((aligned(64)));
10342 };
10343@@ -184,12 +186,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
10344 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
10345 /* flag bit 4 is available */
10346 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
10347-/* flag bit 6 is available */
10348+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
10349 #define TIF_32BIT 7 /* 32-bit binary */
10350 #define TIF_NOHZ 8 /* in adaptive nohz mode */
10351 #define TIF_SECCOMP 9 /* secure computing */
10352 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
10353 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
10354+
10355 /* NOTE: Thread flags >= 12 should be ones we have no interest
10356 * in using in assembly, else we can't use the mask as
10357 * an immediate value in instructions such as andcc.
10358@@ -209,12 +212,17 @@ register struct thread_info *current_thread_info_reg asm("g6");
10359 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
10360 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
10361 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
10362+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
10363
10364 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
10365 _TIF_DO_NOTIFY_RESUME_MASK | \
10366 _TIF_NEED_RESCHED)
10367 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
10368
10369+#define _TIF_WORK_SYSCALL \
10370+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
10371+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
10372+
10373 #define is_32bit_task() (test_thread_flag(TIF_32BIT))
10374
10375 /*
10376diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
10377index bd56c28..4b63d83 100644
10378--- a/arch/sparc/include/asm/uaccess.h
10379+++ b/arch/sparc/include/asm/uaccess.h
10380@@ -1,5 +1,6 @@
10381 #ifndef ___ASM_SPARC_UACCESS_H
10382 #define ___ASM_SPARC_UACCESS_H
10383+
10384 #if defined(__sparc__) && defined(__arch64__)
10385 #include <asm/uaccess_64.h>
10386 #else
10387diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
10388index 64ee103..388aef0 100644
10389--- a/arch/sparc/include/asm/uaccess_32.h
10390+++ b/arch/sparc/include/asm/uaccess_32.h
10391@@ -47,6 +47,7 @@
10392 #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
10393 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
10394 #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
10395+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
10396 #define access_ok(type, addr, size) \
10397 ({ (void)(type); __access_ok((unsigned long)(addr), size); })
10398
10399@@ -313,27 +314,46 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
10400
10401 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
10402 {
10403- if (n && __access_ok((unsigned long) to, n))
10404+ if ((long)n < 0)
10405+ return n;
10406+
10407+ if (n && __access_ok((unsigned long) to, n)) {
10408+ if (!__builtin_constant_p(n))
10409+ check_object_size(from, n, true);
10410 return __copy_user(to, (__force void __user *) from, n);
10411- else
10412+ } else
10413 return n;
10414 }
10415
10416 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
10417 {
10418+ if ((long)n < 0)
10419+ return n;
10420+
10421+ if (!__builtin_constant_p(n))
10422+ check_object_size(from, n, true);
10423+
10424 return __copy_user(to, (__force void __user *) from, n);
10425 }
10426
10427 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
10428 {
10429- if (n && __access_ok((unsigned long) from, n))
10430+ if ((long)n < 0)
10431+ return n;
10432+
10433+ if (n && __access_ok((unsigned long) from, n)) {
10434+ if (!__builtin_constant_p(n))
10435+ check_object_size(to, n, false);
10436 return __copy_user((__force void __user *) to, from, n);
10437- else
10438+ } else
10439 return n;
10440 }
10441
10442 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
10443 {
10444+ if ((long)n < 0)
10445+ return n;
10446+
10447 return __copy_user((__force void __user *) to, from, n);
10448 }
10449
10450diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
10451index a35194b..47dabc0d 100644
10452--- a/arch/sparc/include/asm/uaccess_64.h
10453+++ b/arch/sparc/include/asm/uaccess_64.h
10454@@ -10,6 +10,7 @@
10455 #include <linux/compiler.h>
10456 #include <linux/string.h>
10457 #include <linux/thread_info.h>
10458+#include <linux/kernel.h>
10459 #include <asm/asi.h>
10460 #include <asm/spitfire.h>
10461 #include <asm-generic/uaccess-unaligned.h>
10462@@ -54,6 +55,11 @@ static inline int __access_ok(const void __user * addr, unsigned long size)
10463 return 1;
10464 }
10465
10466+static inline int access_ok_noprefault(int type, const void __user * addr, unsigned long size)
10467+{
10468+ return 1;
10469+}
10470+
10471 static inline int access_ok(int type, const void __user * addr, unsigned long size)
10472 {
10473 return 1;
10474@@ -228,8 +234,15 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
10475 static inline unsigned long __must_check
10476 copy_from_user(void *to, const void __user *from, unsigned long size)
10477 {
10478- unsigned long ret = ___copy_from_user(to, from, size);
10479+ unsigned long ret;
10480
10481+ if ((long)size < 0 || size > INT_MAX)
10482+ return size;
10483+
10484+ if (!__builtin_constant_p(size))
10485+ check_object_size(to, size, false);
10486+
10487+ ret = ___copy_from_user(to, from, size);
10488 if (unlikely(ret))
10489 ret = copy_from_user_fixup(to, from, size);
10490
10491@@ -245,8 +258,15 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
10492 static inline unsigned long __must_check
10493 copy_to_user(void __user *to, const void *from, unsigned long size)
10494 {
10495- unsigned long ret = ___copy_to_user(to, from, size);
10496+ unsigned long ret;
10497
10498+ if ((long)size < 0 || size > INT_MAX)
10499+ return size;
10500+
10501+ if (!__builtin_constant_p(size))
10502+ check_object_size(from, size, true);
10503+
10504+ ret = ___copy_to_user(to, from, size);
10505 if (unlikely(ret))
10506 ret = copy_to_user_fixup(to, from, size);
10507 return ret;
10508diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
10509index 7cf9c6e..6206648 100644
10510--- a/arch/sparc/kernel/Makefile
10511+++ b/arch/sparc/kernel/Makefile
10512@@ -4,7 +4,7 @@
10513 #
10514
10515 asflags-y := -ansi
10516-ccflags-y := -Werror
10517+#ccflags-y := -Werror
10518
10519 extra-y := head_$(BITS).o
10520
10521diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
10522index 50e7b62..79fae35 100644
10523--- a/arch/sparc/kernel/process_32.c
10524+++ b/arch/sparc/kernel/process_32.c
10525@@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r)
10526
10527 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
10528 r->psr, r->pc, r->npc, r->y, print_tainted());
10529- printk("PC: <%pS>\n", (void *) r->pc);
10530+ printk("PC: <%pA>\n", (void *) r->pc);
10531 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10532 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
10533 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
10534 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10535 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
10536 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
10537- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
10538+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
10539
10540 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10541 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
10542@@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
10543 rw = (struct reg_window32 *) fp;
10544 pc = rw->ins[7];
10545 printk("[%08lx : ", pc);
10546- printk("%pS ] ", (void *) pc);
10547+ printk("%pA ] ", (void *) pc);
10548 fp = rw->ins[6];
10549 } while (++count < 16);
10550 printk("\n");
10551diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
10552index 46a5964..a35c62c 100644
10553--- a/arch/sparc/kernel/process_64.c
10554+++ b/arch/sparc/kernel/process_64.c
10555@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
10556 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
10557 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
10558 if (regs->tstate & TSTATE_PRIV)
10559- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
10560+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
10561 }
10562
10563 void show_regs(struct pt_regs *regs)
10564@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
10565
10566 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
10567 regs->tpc, regs->tnpc, regs->y, print_tainted());
10568- printk("TPC: <%pS>\n", (void *) regs->tpc);
10569+ printk("TPC: <%pA>\n", (void *) regs->tpc);
10570 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
10571 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
10572 regs->u_regs[3]);
10573@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
10574 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
10575 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
10576 regs->u_regs[15]);
10577- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
10578+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
10579 show_regwindow(regs);
10580 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
10581 }
10582@@ -278,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
10583 ((tp && tp->task) ? tp->task->pid : -1));
10584
10585 if (gp->tstate & TSTATE_PRIV) {
10586- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
10587+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
10588 (void *) gp->tpc,
10589 (void *) gp->o7,
10590 (void *) gp->i7,
10591diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
10592index 79cc0d1..ec62734 100644
10593--- a/arch/sparc/kernel/prom_common.c
10594+++ b/arch/sparc/kernel/prom_common.c
10595@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
10596
10597 unsigned int prom_early_allocated __initdata;
10598
10599-static struct of_pdt_ops prom_sparc_ops __initdata = {
10600+static struct of_pdt_ops prom_sparc_ops __initconst = {
10601 .nextprop = prom_common_nextprop,
10602 .getproplen = prom_getproplen,
10603 .getproperty = prom_getproperty,
10604diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
10605index 9ddc492..27a5619 100644
10606--- a/arch/sparc/kernel/ptrace_64.c
10607+++ b/arch/sparc/kernel/ptrace_64.c
10608@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
10609 return ret;
10610 }
10611
10612+#ifdef CONFIG_GRKERNSEC_SETXID
10613+extern void gr_delayed_cred_worker(void);
10614+#endif
10615+
10616 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10617 {
10618 int ret = 0;
10619@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10620 if (test_thread_flag(TIF_NOHZ))
10621 user_exit();
10622
10623+#ifdef CONFIG_GRKERNSEC_SETXID
10624+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10625+ gr_delayed_cred_worker();
10626+#endif
10627+
10628 if (test_thread_flag(TIF_SYSCALL_TRACE))
10629 ret = tracehook_report_syscall_entry(regs);
10630
10631@@ -1088,6 +1097,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
10632 if (test_thread_flag(TIF_NOHZ))
10633 user_exit();
10634
10635+#ifdef CONFIG_GRKERNSEC_SETXID
10636+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10637+ gr_delayed_cred_worker();
10638+#endif
10639+
10640 audit_syscall_exit(regs);
10641
10642 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
10643diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
10644index 61139d9..c1a5f28 100644
10645--- a/arch/sparc/kernel/smp_64.c
10646+++ b/arch/sparc/kernel/smp_64.c
10647@@ -887,7 +887,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10648 return;
10649
10650 #ifdef CONFIG_DEBUG_DCFLUSH
10651- atomic_inc(&dcpage_flushes);
10652+ atomic_inc_unchecked(&dcpage_flushes);
10653 #endif
10654
10655 this_cpu = get_cpu();
10656@@ -911,7 +911,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10657 xcall_deliver(data0, __pa(pg_addr),
10658 (u64) pg_addr, cpumask_of(cpu));
10659 #ifdef CONFIG_DEBUG_DCFLUSH
10660- atomic_inc(&dcpage_flushes_xcall);
10661+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10662 #endif
10663 }
10664 }
10665@@ -930,7 +930,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10666 preempt_disable();
10667
10668 #ifdef CONFIG_DEBUG_DCFLUSH
10669- atomic_inc(&dcpage_flushes);
10670+ atomic_inc_unchecked(&dcpage_flushes);
10671 #endif
10672 data0 = 0;
10673 pg_addr = page_address(page);
10674@@ -947,7 +947,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10675 xcall_deliver(data0, __pa(pg_addr),
10676 (u64) pg_addr, cpu_online_mask);
10677 #ifdef CONFIG_DEBUG_DCFLUSH
10678- atomic_inc(&dcpage_flushes_xcall);
10679+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10680 #endif
10681 }
10682 __local_flush_dcache_page(page);
10683diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
10684index 646988d..b88905f 100644
10685--- a/arch/sparc/kernel/sys_sparc_32.c
10686+++ b/arch/sparc/kernel/sys_sparc_32.c
10687@@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10688 if (len > TASK_SIZE - PAGE_SIZE)
10689 return -ENOMEM;
10690 if (!addr)
10691- addr = TASK_UNMAPPED_BASE;
10692+ addr = current->mm->mmap_base;
10693
10694 info.flags = 0;
10695 info.length = len;
10696diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
10697index 30e7ddb..266a3b0 100644
10698--- a/arch/sparc/kernel/sys_sparc_64.c
10699+++ b/arch/sparc/kernel/sys_sparc_64.c
10700@@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10701 struct vm_area_struct * vma;
10702 unsigned long task_size = TASK_SIZE;
10703 int do_color_align;
10704+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10705 struct vm_unmapped_area_info info;
10706
10707 if (flags & MAP_FIXED) {
10708 /* We do not accept a shared mapping if it would violate
10709 * cache aliasing constraints.
10710 */
10711- if ((flags & MAP_SHARED) &&
10712+ if ((filp || (flags & MAP_SHARED)) &&
10713 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10714 return -EINVAL;
10715 return addr;
10716@@ -110,6 +111,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10717 if (filp || (flags & MAP_SHARED))
10718 do_color_align = 1;
10719
10720+#ifdef CONFIG_PAX_RANDMMAP
10721+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10722+#endif
10723+
10724 if (addr) {
10725 if (do_color_align)
10726 addr = COLOR_ALIGN(addr, pgoff);
10727@@ -117,22 +122,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10728 addr = PAGE_ALIGN(addr);
10729
10730 vma = find_vma(mm, addr);
10731- if (task_size - len >= addr &&
10732- (!vma || addr + len <= vma->vm_start))
10733+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10734 return addr;
10735 }
10736
10737 info.flags = 0;
10738 info.length = len;
10739- info.low_limit = TASK_UNMAPPED_BASE;
10740+ info.low_limit = mm->mmap_base;
10741 info.high_limit = min(task_size, VA_EXCLUDE_START);
10742 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10743 info.align_offset = pgoff << PAGE_SHIFT;
10744+ info.threadstack_offset = offset;
10745 addr = vm_unmapped_area(&info);
10746
10747 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
10748 VM_BUG_ON(addr != -ENOMEM);
10749 info.low_limit = VA_EXCLUDE_END;
10750+
10751+#ifdef CONFIG_PAX_RANDMMAP
10752+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10753+ info.low_limit += mm->delta_mmap;
10754+#endif
10755+
10756 info.high_limit = task_size;
10757 addr = vm_unmapped_area(&info);
10758 }
10759@@ -150,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10760 unsigned long task_size = STACK_TOP32;
10761 unsigned long addr = addr0;
10762 int do_color_align;
10763+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10764 struct vm_unmapped_area_info info;
10765
10766 /* This should only ever run for 32-bit processes. */
10767@@ -159,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10768 /* We do not accept a shared mapping if it would violate
10769 * cache aliasing constraints.
10770 */
10771- if ((flags & MAP_SHARED) &&
10772+ if ((filp || (flags & MAP_SHARED)) &&
10773 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10774 return -EINVAL;
10775 return addr;
10776@@ -172,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10777 if (filp || (flags & MAP_SHARED))
10778 do_color_align = 1;
10779
10780+#ifdef CONFIG_PAX_RANDMMAP
10781+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10782+#endif
10783+
10784 /* requesting a specific address */
10785 if (addr) {
10786 if (do_color_align)
10787@@ -180,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10788 addr = PAGE_ALIGN(addr);
10789
10790 vma = find_vma(mm, addr);
10791- if (task_size - len >= addr &&
10792- (!vma || addr + len <= vma->vm_start))
10793+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10794 return addr;
10795 }
10796
10797@@ -191,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10798 info.high_limit = mm->mmap_base;
10799 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10800 info.align_offset = pgoff << PAGE_SHIFT;
10801+ info.threadstack_offset = offset;
10802 addr = vm_unmapped_area(&info);
10803
10804 /*
10805@@ -203,6 +219,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10806 VM_BUG_ON(addr != -ENOMEM);
10807 info.flags = 0;
10808 info.low_limit = TASK_UNMAPPED_BASE;
10809+
10810+#ifdef CONFIG_PAX_RANDMMAP
10811+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10812+ info.low_limit += mm->delta_mmap;
10813+#endif
10814+
10815 info.high_limit = STACK_TOP32;
10816 addr = vm_unmapped_area(&info);
10817 }
10818@@ -259,10 +281,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
10819 EXPORT_SYMBOL(get_fb_unmapped_area);
10820
10821 /* Essentially the same as PowerPC. */
10822-static unsigned long mmap_rnd(void)
10823+static unsigned long mmap_rnd(struct mm_struct *mm)
10824 {
10825 unsigned long rnd = 0UL;
10826
10827+#ifdef CONFIG_PAX_RANDMMAP
10828+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10829+#endif
10830+
10831 if (current->flags & PF_RANDOMIZE) {
10832 unsigned long val = get_random_int();
10833 if (test_thread_flag(TIF_32BIT))
10834@@ -275,7 +301,7 @@ static unsigned long mmap_rnd(void)
10835
10836 void arch_pick_mmap_layout(struct mm_struct *mm)
10837 {
10838- unsigned long random_factor = mmap_rnd();
10839+ unsigned long random_factor = mmap_rnd(mm);
10840 unsigned long gap;
10841
10842 /*
10843@@ -288,6 +314,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10844 gap == RLIM_INFINITY ||
10845 sysctl_legacy_va_layout) {
10846 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
10847+
10848+#ifdef CONFIG_PAX_RANDMMAP
10849+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10850+ mm->mmap_base += mm->delta_mmap;
10851+#endif
10852+
10853 mm->get_unmapped_area = arch_get_unmapped_area;
10854 } else {
10855 /* We know it's 32-bit */
10856@@ -299,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10857 gap = (task_size / 6 * 5);
10858
10859 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
10860+
10861+#ifdef CONFIG_PAX_RANDMMAP
10862+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10863+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
10864+#endif
10865+
10866 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
10867 }
10868 }
10869diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
10870index bb00089..e0ea580 100644
10871--- a/arch/sparc/kernel/syscalls.S
10872+++ b/arch/sparc/kernel/syscalls.S
10873@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
10874 #endif
10875 .align 32
10876 1: ldx [%g6 + TI_FLAGS], %l5
10877- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10878+ andcc %l5, _TIF_WORK_SYSCALL, %g0
10879 be,pt %icc, rtrap
10880 nop
10881 call syscall_trace_leave
10882@@ -194,7 +194,7 @@ linux_sparc_syscall32:
10883
10884 srl %i3, 0, %o3 ! IEU0
10885 srl %i2, 0, %o2 ! IEU0 Group
10886- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10887+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10888 bne,pn %icc, linux_syscall_trace32 ! CTI
10889 mov %i0, %l5 ! IEU1
10890 5: call %l7 ! CTI Group brk forced
10891@@ -218,7 +218,7 @@ linux_sparc_syscall:
10892
10893 mov %i3, %o3 ! IEU1
10894 mov %i4, %o4 ! IEU0 Group
10895- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10896+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10897 bne,pn %icc, linux_syscall_trace ! CTI Group
10898 mov %i0, %l5 ! IEU0
10899 2: call %l7 ! CTI Group brk forced
10900@@ -233,7 +233,7 @@ ret_sys_call:
10901
10902 cmp %o0, -ERESTART_RESTARTBLOCK
10903 bgeu,pn %xcc, 1f
10904- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10905+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10906 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
10907
10908 2:
10909diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
10910index 6fd386c5..6907d81 100644
10911--- a/arch/sparc/kernel/traps_32.c
10912+++ b/arch/sparc/kernel/traps_32.c
10913@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
10914 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
10915 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
10916
10917+extern void gr_handle_kernel_exploit(void);
10918+
10919 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
10920 {
10921 static int die_counter;
10922@@ -76,15 +78,17 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
10923 count++ < 30 &&
10924 (((unsigned long) rw) >= PAGE_OFFSET) &&
10925 !(((unsigned long) rw) & 0x7)) {
10926- printk("Caller[%08lx]: %pS\n", rw->ins[7],
10927+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
10928 (void *) rw->ins[7]);
10929 rw = (struct reg_window32 *)rw->ins[6];
10930 }
10931 }
10932 printk("Instruction DUMP:");
10933 instruction_dump ((unsigned long *) regs->pc);
10934- if(regs->psr & PSR_PS)
10935+ if(regs->psr & PSR_PS) {
10936+ gr_handle_kernel_exploit();
10937 do_exit(SIGKILL);
10938+ }
10939 do_exit(SIGSEGV);
10940 }
10941
10942diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
10943index 0e69974..0c15a6e 100644
10944--- a/arch/sparc/kernel/traps_64.c
10945+++ b/arch/sparc/kernel/traps_64.c
10946@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
10947 i + 1,
10948 p->trapstack[i].tstate, p->trapstack[i].tpc,
10949 p->trapstack[i].tnpc, p->trapstack[i].tt);
10950- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
10951+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
10952 }
10953 }
10954
10955@@ -99,6 +99,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
10956
10957 lvl -= 0x100;
10958 if (regs->tstate & TSTATE_PRIV) {
10959+
10960+#ifdef CONFIG_PAX_REFCOUNT
10961+ if (lvl == 6)
10962+ pax_report_refcount_overflow(regs);
10963+#endif
10964+
10965 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
10966 die_if_kernel(buffer, regs);
10967 }
10968@@ -117,11 +123,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
10969 void bad_trap_tl1(struct pt_regs *regs, long lvl)
10970 {
10971 char buffer[32];
10972-
10973+
10974 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
10975 0, lvl, SIGTRAP) == NOTIFY_STOP)
10976 return;
10977
10978+#ifdef CONFIG_PAX_REFCOUNT
10979+ if (lvl == 6)
10980+ pax_report_refcount_overflow(regs);
10981+#endif
10982+
10983 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
10984
10985 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
10986@@ -1151,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
10987 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
10988 printk("%s" "ERROR(%d): ",
10989 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
10990- printk("TPC<%pS>\n", (void *) regs->tpc);
10991+ printk("TPC<%pA>\n", (void *) regs->tpc);
10992 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
10993 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
10994 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
10995@@ -1758,7 +1769,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
10996 smp_processor_id(),
10997 (type & 0x1) ? 'I' : 'D',
10998 regs->tpc);
10999- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
11000+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
11001 panic("Irrecoverable Cheetah+ parity error.");
11002 }
11003
11004@@ -1766,7 +1777,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11005 smp_processor_id(),
11006 (type & 0x1) ? 'I' : 'D',
11007 regs->tpc);
11008- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
11009+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
11010 }
11011
11012 struct sun4v_error_entry {
11013@@ -1839,8 +1850,8 @@ struct sun4v_error_entry {
11014 /*0x38*/u64 reserved_5;
11015 };
11016
11017-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11018-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11019+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11020+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11021
11022 static const char *sun4v_err_type_to_str(u8 type)
11023 {
11024@@ -1932,7 +1943,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
11025 }
11026
11027 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11028- int cpu, const char *pfx, atomic_t *ocnt)
11029+ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
11030 {
11031 u64 *raw_ptr = (u64 *) ent;
11032 u32 attrs;
11033@@ -1990,8 +2001,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11034
11035 show_regs(regs);
11036
11037- if ((cnt = atomic_read(ocnt)) != 0) {
11038- atomic_set(ocnt, 0);
11039+ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
11040+ atomic_set_unchecked(ocnt, 0);
11041 wmb();
11042 printk("%s: Queue overflowed %d times.\n",
11043 pfx, cnt);
11044@@ -2048,7 +2059,7 @@ out:
11045 */
11046 void sun4v_resum_overflow(struct pt_regs *regs)
11047 {
11048- atomic_inc(&sun4v_resum_oflow_cnt);
11049+ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
11050 }
11051
11052 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
11053@@ -2101,7 +2112,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
11054 /* XXX Actually even this can make not that much sense. Perhaps
11055 * XXX we should just pull the plug and panic directly from here?
11056 */
11057- atomic_inc(&sun4v_nonresum_oflow_cnt);
11058+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
11059 }
11060
11061 static void sun4v_tlb_error(struct pt_regs *regs)
11062@@ -2120,9 +2131,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
11063
11064 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
11065 regs->tpc, tl);
11066- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
11067+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
11068 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11069- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
11070+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
11071 (void *) regs->u_regs[UREG_I7]);
11072 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
11073 "pte[%lx] error[%lx]\n",
11074@@ -2143,9 +2154,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
11075
11076 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
11077 regs->tpc, tl);
11078- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
11079+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
11080 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11081- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
11082+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
11083 (void *) regs->u_regs[UREG_I7]);
11084 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
11085 "pte[%lx] error[%lx]\n",
11086@@ -2362,13 +2373,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
11087 fp = (unsigned long)sf->fp + STACK_BIAS;
11088 }
11089
11090- printk(" [%016lx] %pS\n", pc, (void *) pc);
11091+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11092 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
11093 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
11094 int index = tsk->curr_ret_stack;
11095 if (tsk->ret_stack && index >= graph) {
11096 pc = tsk->ret_stack[index - graph].ret;
11097- printk(" [%016lx] %pS\n", pc, (void *) pc);
11098+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11099 graph++;
11100 }
11101 }
11102@@ -2386,6 +2397,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
11103 return (struct reg_window *) (fp + STACK_BIAS);
11104 }
11105
11106+extern void gr_handle_kernel_exploit(void);
11107+
11108 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11109 {
11110 static int die_counter;
11111@@ -2414,7 +2427,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11112 while (rw &&
11113 count++ < 30 &&
11114 kstack_valid(tp, (unsigned long) rw)) {
11115- printk("Caller[%016lx]: %pS\n", rw->ins[7],
11116+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
11117 (void *) rw->ins[7]);
11118
11119 rw = kernel_stack_up(rw);
11120@@ -2429,8 +2442,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11121 }
11122 if (panic_on_oops)
11123 panic("Fatal exception");
11124- if (regs->tstate & TSTATE_PRIV)
11125+ if (regs->tstate & TSTATE_PRIV) {
11126+ gr_handle_kernel_exploit();
11127 do_exit(SIGKILL);
11128+ }
11129 do_exit(SIGSEGV);
11130 }
11131 EXPORT_SYMBOL(die_if_kernel);
11132diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
11133index 62098a8..547ab2c 100644
11134--- a/arch/sparc/kernel/unaligned_64.c
11135+++ b/arch/sparc/kernel/unaligned_64.c
11136@@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs)
11137 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
11138
11139 if (__ratelimit(&ratelimit)) {
11140- printk("Kernel unaligned access at TPC[%lx] %pS\n",
11141+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
11142 regs->tpc, (void *) regs->tpc);
11143 }
11144 }
11145diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
11146index 3269b02..64f5231 100644
11147--- a/arch/sparc/lib/Makefile
11148+++ b/arch/sparc/lib/Makefile
11149@@ -2,7 +2,7 @@
11150 #
11151
11152 asflags-y := -ansi -DST_DIV0=0x02
11153-ccflags-y := -Werror
11154+#ccflags-y := -Werror
11155
11156 lib-$(CONFIG_SPARC32) += ashrdi3.o
11157 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
11158diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
11159index 05dac43..76f8ed4 100644
11160--- a/arch/sparc/lib/atomic_64.S
11161+++ b/arch/sparc/lib/atomic_64.S
11162@@ -15,11 +15,22 @@
11163 * a value and does the barriers.
11164 */
11165
11166-#define ATOMIC_OP(op) \
11167-ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11168+#ifdef CONFIG_PAX_REFCOUNT
11169+#define __REFCOUNT_OP(op) op##cc
11170+#define __OVERFLOW_IOP tvs %icc, 6;
11171+#define __OVERFLOW_XOP tvs %xcc, 6;
11172+#else
11173+#define __REFCOUNT_OP(op) op
11174+#define __OVERFLOW_IOP
11175+#define __OVERFLOW_XOP
11176+#endif
11177+
11178+#define __ATOMIC_OP(op, suffix, asm_op, post_op) \
11179+ENTRY(atomic_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11180 BACKOFF_SETUP(%o2); \
11181 1: lduw [%o1], %g1; \
11182- op %g1, %o0, %g7; \
11183+ asm_op %g1, %o0, %g7; \
11184+ post_op \
11185 cas [%o1], %g1, %g7; \
11186 cmp %g1, %g7; \
11187 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11188@@ -29,11 +40,15 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11189 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11190 ENDPROC(atomic_##op); \
11191
11192-#define ATOMIC_OP_RETURN(op) \
11193-ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11194+#define ATOMIC_OP(op) __ATOMIC_OP(op, , op, ) \
11195+ __ATOMIC_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11196+
11197+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op) \
11198+ENTRY(atomic_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11199 BACKOFF_SETUP(%o2); \
11200 1: lduw [%o1], %g1; \
11201- op %g1, %o0, %g7; \
11202+ asm_op %g1, %o0, %g7; \
11203+ post_op \
11204 cas [%o1], %g1, %g7; \
11205 cmp %g1, %g7; \
11206 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11207@@ -43,6 +58,9 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11208 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11209 ENDPROC(atomic_##op##_return);
11210
11211+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, , op, ) \
11212+ __ATOMIC_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11213+
11214 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11215
11216 ATOMIC_OPS(add)
11217@@ -50,13 +68,16 @@ ATOMIC_OPS(sub)
11218
11219 #undef ATOMIC_OPS
11220 #undef ATOMIC_OP_RETURN
11221+#undef __ATOMIC_OP_RETURN
11222 #undef ATOMIC_OP
11223+#undef __ATOMIC_OP
11224
11225-#define ATOMIC64_OP(op) \
11226-ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11227+#define __ATOMIC64_OP(op, suffix, asm_op, post_op) \
11228+ENTRY(atomic64_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11229 BACKOFF_SETUP(%o2); \
11230 1: ldx [%o1], %g1; \
11231- op %g1, %o0, %g7; \
11232+ asm_op %g1, %o0, %g7; \
11233+ post_op \
11234 casx [%o1], %g1, %g7; \
11235 cmp %g1, %g7; \
11236 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11237@@ -66,11 +87,15 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11238 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11239 ENDPROC(atomic64_##op); \
11240
11241-#define ATOMIC64_OP_RETURN(op) \
11242-ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11243+#define ATOMIC64_OP(op) __ATOMIC64_OP(op, , op, ) \
11244+ __ATOMIC64_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11245+
11246+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op) \
11247+ENTRY(atomic64_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11248 BACKOFF_SETUP(%o2); \
11249 1: ldx [%o1], %g1; \
11250- op %g1, %o0, %g7; \
11251+ asm_op %g1, %o0, %g7; \
11252+ post_op \
11253 casx [%o1], %g1, %g7; \
11254 cmp %g1, %g7; \
11255 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11256@@ -80,6 +105,9 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11257 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11258 ENDPROC(atomic64_##op##_return);
11259
11260+#define ATOMIC64_OP_RETURN(op) __ATOMIC64_OP_RETURN(op, , op, ) \
11261+i __ATOMIC64_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11262+
11263 #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
11264
11265 ATOMIC64_OPS(add)
11266@@ -87,7 +115,12 @@ ATOMIC64_OPS(sub)
11267
11268 #undef ATOMIC64_OPS
11269 #undef ATOMIC64_OP_RETURN
11270+#undef __ATOMIC64_OP_RETURN
11271 #undef ATOMIC64_OP
11272+#undef __ATOMIC64_OP
11273+#undef __OVERFLOW_XOP
11274+#undef __OVERFLOW_IOP
11275+#undef __REFCOUNT_OP
11276
11277 ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
11278 BACKOFF_SETUP(%o2)
11279diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
11280index 1d649a9..fbc5bfc 100644
11281--- a/arch/sparc/lib/ksyms.c
11282+++ b/arch/sparc/lib/ksyms.c
11283@@ -101,7 +101,9 @@ EXPORT_SYMBOL(__clear_user);
11284 /* Atomic counter implementation. */
11285 #define ATOMIC_OP(op) \
11286 EXPORT_SYMBOL(atomic_##op); \
11287-EXPORT_SYMBOL(atomic64_##op);
11288+EXPORT_SYMBOL(atomic_##op##_unchecked); \
11289+EXPORT_SYMBOL(atomic64_##op); \
11290+EXPORT_SYMBOL(atomic64_##op##_unchecked);
11291
11292 #define ATOMIC_OP_RETURN(op) \
11293 EXPORT_SYMBOL(atomic_##op##_return); \
11294@@ -110,6 +112,8 @@ EXPORT_SYMBOL(atomic64_##op##_return);
11295 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11296
11297 ATOMIC_OPS(add)
11298+EXPORT_SYMBOL(atomic_add_ret_unchecked);
11299+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
11300 ATOMIC_OPS(sub)
11301
11302 #undef ATOMIC_OPS
11303diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
11304index 30c3ecc..736f015 100644
11305--- a/arch/sparc/mm/Makefile
11306+++ b/arch/sparc/mm/Makefile
11307@@ -2,7 +2,7 @@
11308 #
11309
11310 asflags-y := -ansi
11311-ccflags-y := -Werror
11312+#ccflags-y := -Werror
11313
11314 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
11315 obj-y += fault_$(BITS).o
11316diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
11317index 70d8171..274c6c0 100644
11318--- a/arch/sparc/mm/fault_32.c
11319+++ b/arch/sparc/mm/fault_32.c
11320@@ -21,6 +21,9 @@
11321 #include <linux/perf_event.h>
11322 #include <linux/interrupt.h>
11323 #include <linux/kdebug.h>
11324+#include <linux/slab.h>
11325+#include <linux/pagemap.h>
11326+#include <linux/compiler.h>
11327
11328 #include <asm/page.h>
11329 #include <asm/pgtable.h>
11330@@ -156,6 +159,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
11331 return safe_compute_effective_address(regs, insn);
11332 }
11333
11334+#ifdef CONFIG_PAX_PAGEEXEC
11335+#ifdef CONFIG_PAX_DLRESOLVE
11336+static void pax_emuplt_close(struct vm_area_struct *vma)
11337+{
11338+ vma->vm_mm->call_dl_resolve = 0UL;
11339+}
11340+
11341+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11342+{
11343+ unsigned int *kaddr;
11344+
11345+ vmf->page = alloc_page(GFP_HIGHUSER);
11346+ if (!vmf->page)
11347+ return VM_FAULT_OOM;
11348+
11349+ kaddr = kmap(vmf->page);
11350+ memset(kaddr, 0, PAGE_SIZE);
11351+ kaddr[0] = 0x9DE3BFA8U; /* save */
11352+ flush_dcache_page(vmf->page);
11353+ kunmap(vmf->page);
11354+ return VM_FAULT_MAJOR;
11355+}
11356+
11357+static const struct vm_operations_struct pax_vm_ops = {
11358+ .close = pax_emuplt_close,
11359+ .fault = pax_emuplt_fault
11360+};
11361+
11362+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11363+{
11364+ int ret;
11365+
11366+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11367+ vma->vm_mm = current->mm;
11368+ vma->vm_start = addr;
11369+ vma->vm_end = addr + PAGE_SIZE;
11370+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11371+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11372+ vma->vm_ops = &pax_vm_ops;
11373+
11374+ ret = insert_vm_struct(current->mm, vma);
11375+ if (ret)
11376+ return ret;
11377+
11378+ ++current->mm->total_vm;
11379+ return 0;
11380+}
11381+#endif
11382+
11383+/*
11384+ * PaX: decide what to do with offenders (regs->pc = fault address)
11385+ *
11386+ * returns 1 when task should be killed
11387+ * 2 when patched PLT trampoline was detected
11388+ * 3 when unpatched PLT trampoline was detected
11389+ */
11390+static int pax_handle_fetch_fault(struct pt_regs *regs)
11391+{
11392+
11393+#ifdef CONFIG_PAX_EMUPLT
11394+ int err;
11395+
11396+ do { /* PaX: patched PLT emulation #1 */
11397+ unsigned int sethi1, sethi2, jmpl;
11398+
11399+ err = get_user(sethi1, (unsigned int *)regs->pc);
11400+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
11401+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
11402+
11403+ if (err)
11404+ break;
11405+
11406+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11407+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11408+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11409+ {
11410+ unsigned int addr;
11411+
11412+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11413+ addr = regs->u_regs[UREG_G1];
11414+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11415+ regs->pc = addr;
11416+ regs->npc = addr+4;
11417+ return 2;
11418+ }
11419+ } while (0);
11420+
11421+ do { /* PaX: patched PLT emulation #2 */
11422+ unsigned int ba;
11423+
11424+ err = get_user(ba, (unsigned int *)regs->pc);
11425+
11426+ if (err)
11427+ break;
11428+
11429+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11430+ unsigned int addr;
11431+
11432+ if ((ba & 0xFFC00000U) == 0x30800000U)
11433+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11434+ else
11435+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11436+ regs->pc = addr;
11437+ regs->npc = addr+4;
11438+ return 2;
11439+ }
11440+ } while (0);
11441+
11442+ do { /* PaX: patched PLT emulation #3 */
11443+ unsigned int sethi, bajmpl, nop;
11444+
11445+ err = get_user(sethi, (unsigned int *)regs->pc);
11446+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
11447+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11448+
11449+ if (err)
11450+ break;
11451+
11452+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11453+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11454+ nop == 0x01000000U)
11455+ {
11456+ unsigned int addr;
11457+
11458+ addr = (sethi & 0x003FFFFFU) << 10;
11459+ regs->u_regs[UREG_G1] = addr;
11460+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11461+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11462+ else
11463+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11464+ regs->pc = addr;
11465+ regs->npc = addr+4;
11466+ return 2;
11467+ }
11468+ } while (0);
11469+
11470+ do { /* PaX: unpatched PLT emulation step 1 */
11471+ unsigned int sethi, ba, nop;
11472+
11473+ err = get_user(sethi, (unsigned int *)regs->pc);
11474+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
11475+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11476+
11477+ if (err)
11478+ break;
11479+
11480+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11481+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11482+ nop == 0x01000000U)
11483+ {
11484+ unsigned int addr, save, call;
11485+
11486+ if ((ba & 0xFFC00000U) == 0x30800000U)
11487+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11488+ else
11489+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11490+
11491+ err = get_user(save, (unsigned int *)addr);
11492+ err |= get_user(call, (unsigned int *)(addr+4));
11493+ err |= get_user(nop, (unsigned int *)(addr+8));
11494+ if (err)
11495+ break;
11496+
11497+#ifdef CONFIG_PAX_DLRESOLVE
11498+ if (save == 0x9DE3BFA8U &&
11499+ (call & 0xC0000000U) == 0x40000000U &&
11500+ nop == 0x01000000U)
11501+ {
11502+ struct vm_area_struct *vma;
11503+ unsigned long call_dl_resolve;
11504+
11505+ down_read(&current->mm->mmap_sem);
11506+ call_dl_resolve = current->mm->call_dl_resolve;
11507+ up_read(&current->mm->mmap_sem);
11508+ if (likely(call_dl_resolve))
11509+ goto emulate;
11510+
11511+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11512+
11513+ down_write(&current->mm->mmap_sem);
11514+ if (current->mm->call_dl_resolve) {
11515+ call_dl_resolve = current->mm->call_dl_resolve;
11516+ up_write(&current->mm->mmap_sem);
11517+ if (vma)
11518+ kmem_cache_free(vm_area_cachep, vma);
11519+ goto emulate;
11520+ }
11521+
11522+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11523+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11524+ up_write(&current->mm->mmap_sem);
11525+ if (vma)
11526+ kmem_cache_free(vm_area_cachep, vma);
11527+ return 1;
11528+ }
11529+
11530+ if (pax_insert_vma(vma, call_dl_resolve)) {
11531+ up_write(&current->mm->mmap_sem);
11532+ kmem_cache_free(vm_area_cachep, vma);
11533+ return 1;
11534+ }
11535+
11536+ current->mm->call_dl_resolve = call_dl_resolve;
11537+ up_write(&current->mm->mmap_sem);
11538+
11539+emulate:
11540+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11541+ regs->pc = call_dl_resolve;
11542+ regs->npc = addr+4;
11543+ return 3;
11544+ }
11545+#endif
11546+
11547+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11548+ if ((save & 0xFFC00000U) == 0x05000000U &&
11549+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11550+ nop == 0x01000000U)
11551+ {
11552+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11553+ regs->u_regs[UREG_G2] = addr + 4;
11554+ addr = (save & 0x003FFFFFU) << 10;
11555+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11556+ regs->pc = addr;
11557+ regs->npc = addr+4;
11558+ return 3;
11559+ }
11560+ }
11561+ } while (0);
11562+
11563+ do { /* PaX: unpatched PLT emulation step 2 */
11564+ unsigned int save, call, nop;
11565+
11566+ err = get_user(save, (unsigned int *)(regs->pc-4));
11567+ err |= get_user(call, (unsigned int *)regs->pc);
11568+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
11569+ if (err)
11570+ break;
11571+
11572+ if (save == 0x9DE3BFA8U &&
11573+ (call & 0xC0000000U) == 0x40000000U &&
11574+ nop == 0x01000000U)
11575+ {
11576+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
11577+
11578+ regs->u_regs[UREG_RETPC] = regs->pc;
11579+ regs->pc = dl_resolve;
11580+ regs->npc = dl_resolve+4;
11581+ return 3;
11582+ }
11583+ } while (0);
11584+#endif
11585+
11586+ return 1;
11587+}
11588+
11589+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11590+{
11591+ unsigned long i;
11592+
11593+ printk(KERN_ERR "PAX: bytes at PC: ");
11594+ for (i = 0; i < 8; i++) {
11595+ unsigned int c;
11596+ if (get_user(c, (unsigned int *)pc+i))
11597+ printk(KERN_CONT "???????? ");
11598+ else
11599+ printk(KERN_CONT "%08x ", c);
11600+ }
11601+ printk("\n");
11602+}
11603+#endif
11604+
11605 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
11606 int text_fault)
11607 {
11608@@ -226,6 +500,24 @@ good_area:
11609 if (!(vma->vm_flags & VM_WRITE))
11610 goto bad_area;
11611 } else {
11612+
11613+#ifdef CONFIG_PAX_PAGEEXEC
11614+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
11615+ up_read(&mm->mmap_sem);
11616+ switch (pax_handle_fetch_fault(regs)) {
11617+
11618+#ifdef CONFIG_PAX_EMUPLT
11619+ case 2:
11620+ case 3:
11621+ return;
11622+#endif
11623+
11624+ }
11625+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
11626+ do_group_exit(SIGKILL);
11627+ }
11628+#endif
11629+
11630 /* Allow reads even for write-only mappings */
11631 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
11632 goto bad_area;
11633diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
11634index 4798232..f76e3aa 100644
11635--- a/arch/sparc/mm/fault_64.c
11636+++ b/arch/sparc/mm/fault_64.c
11637@@ -22,6 +22,9 @@
11638 #include <linux/kdebug.h>
11639 #include <linux/percpu.h>
11640 #include <linux/context_tracking.h>
11641+#include <linux/slab.h>
11642+#include <linux/pagemap.h>
11643+#include <linux/compiler.h>
11644
11645 #include <asm/page.h>
11646 #include <asm/pgtable.h>
11647@@ -76,7 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
11648 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
11649 regs->tpc);
11650 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
11651- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
11652+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
11653 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
11654 dump_stack();
11655 unhandled_fault(regs->tpc, current, regs);
11656@@ -279,6 +282,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
11657 show_regs(regs);
11658 }
11659
11660+#ifdef CONFIG_PAX_PAGEEXEC
11661+#ifdef CONFIG_PAX_DLRESOLVE
11662+static void pax_emuplt_close(struct vm_area_struct *vma)
11663+{
11664+ vma->vm_mm->call_dl_resolve = 0UL;
11665+}
11666+
11667+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11668+{
11669+ unsigned int *kaddr;
11670+
11671+ vmf->page = alloc_page(GFP_HIGHUSER);
11672+ if (!vmf->page)
11673+ return VM_FAULT_OOM;
11674+
11675+ kaddr = kmap(vmf->page);
11676+ memset(kaddr, 0, PAGE_SIZE);
11677+ kaddr[0] = 0x9DE3BFA8U; /* save */
11678+ flush_dcache_page(vmf->page);
11679+ kunmap(vmf->page);
11680+ return VM_FAULT_MAJOR;
11681+}
11682+
11683+static const struct vm_operations_struct pax_vm_ops = {
11684+ .close = pax_emuplt_close,
11685+ .fault = pax_emuplt_fault
11686+};
11687+
11688+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11689+{
11690+ int ret;
11691+
11692+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11693+ vma->vm_mm = current->mm;
11694+ vma->vm_start = addr;
11695+ vma->vm_end = addr + PAGE_SIZE;
11696+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11697+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11698+ vma->vm_ops = &pax_vm_ops;
11699+
11700+ ret = insert_vm_struct(current->mm, vma);
11701+ if (ret)
11702+ return ret;
11703+
11704+ ++current->mm->total_vm;
11705+ return 0;
11706+}
11707+#endif
11708+
11709+/*
11710+ * PaX: decide what to do with offenders (regs->tpc = fault address)
11711+ *
11712+ * returns 1 when task should be killed
11713+ * 2 when patched PLT trampoline was detected
11714+ * 3 when unpatched PLT trampoline was detected
11715+ */
11716+static int pax_handle_fetch_fault(struct pt_regs *regs)
11717+{
11718+
11719+#ifdef CONFIG_PAX_EMUPLT
11720+ int err;
11721+
11722+ do { /* PaX: patched PLT emulation #1 */
11723+ unsigned int sethi1, sethi2, jmpl;
11724+
11725+ err = get_user(sethi1, (unsigned int *)regs->tpc);
11726+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
11727+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
11728+
11729+ if (err)
11730+ break;
11731+
11732+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11733+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11734+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11735+ {
11736+ unsigned long addr;
11737+
11738+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11739+ addr = regs->u_regs[UREG_G1];
11740+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11741+
11742+ if (test_thread_flag(TIF_32BIT))
11743+ addr &= 0xFFFFFFFFUL;
11744+
11745+ regs->tpc = addr;
11746+ regs->tnpc = addr+4;
11747+ return 2;
11748+ }
11749+ } while (0);
11750+
11751+ do { /* PaX: patched PLT emulation #2 */
11752+ unsigned int ba;
11753+
11754+ err = get_user(ba, (unsigned int *)regs->tpc);
11755+
11756+ if (err)
11757+ break;
11758+
11759+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11760+ unsigned long addr;
11761+
11762+ if ((ba & 0xFFC00000U) == 0x30800000U)
11763+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11764+ else
11765+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11766+
11767+ if (test_thread_flag(TIF_32BIT))
11768+ addr &= 0xFFFFFFFFUL;
11769+
11770+ regs->tpc = addr;
11771+ regs->tnpc = addr+4;
11772+ return 2;
11773+ }
11774+ } while (0);
11775+
11776+ do { /* PaX: patched PLT emulation #3 */
11777+ unsigned int sethi, bajmpl, nop;
11778+
11779+ err = get_user(sethi, (unsigned int *)regs->tpc);
11780+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
11781+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11782+
11783+ if (err)
11784+ break;
11785+
11786+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11787+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11788+ nop == 0x01000000U)
11789+ {
11790+ unsigned long addr;
11791+
11792+ addr = (sethi & 0x003FFFFFU) << 10;
11793+ regs->u_regs[UREG_G1] = addr;
11794+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11795+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11796+ else
11797+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11798+
11799+ if (test_thread_flag(TIF_32BIT))
11800+ addr &= 0xFFFFFFFFUL;
11801+
11802+ regs->tpc = addr;
11803+ regs->tnpc = addr+4;
11804+ return 2;
11805+ }
11806+ } while (0);
11807+
11808+ do { /* PaX: patched PLT emulation #4 */
11809+ unsigned int sethi, mov1, call, mov2;
11810+
11811+ err = get_user(sethi, (unsigned int *)regs->tpc);
11812+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
11813+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
11814+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
11815+
11816+ if (err)
11817+ break;
11818+
11819+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11820+ mov1 == 0x8210000FU &&
11821+ (call & 0xC0000000U) == 0x40000000U &&
11822+ mov2 == 0x9E100001U)
11823+ {
11824+ unsigned long addr;
11825+
11826+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
11827+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
11828+
11829+ if (test_thread_flag(TIF_32BIT))
11830+ addr &= 0xFFFFFFFFUL;
11831+
11832+ regs->tpc = addr;
11833+ regs->tnpc = addr+4;
11834+ return 2;
11835+ }
11836+ } while (0);
11837+
11838+ do { /* PaX: patched PLT emulation #5 */
11839+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
11840+
11841+ err = get_user(sethi, (unsigned int *)regs->tpc);
11842+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11843+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11844+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
11845+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
11846+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
11847+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
11848+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
11849+
11850+ if (err)
11851+ break;
11852+
11853+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11854+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11855+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11856+ (or1 & 0xFFFFE000U) == 0x82106000U &&
11857+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
11858+ sllx == 0x83287020U &&
11859+ jmpl == 0x81C04005U &&
11860+ nop == 0x01000000U)
11861+ {
11862+ unsigned long addr;
11863+
11864+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
11865+ regs->u_regs[UREG_G1] <<= 32;
11866+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
11867+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11868+ regs->tpc = addr;
11869+ regs->tnpc = addr+4;
11870+ return 2;
11871+ }
11872+ } while (0);
11873+
11874+ do { /* PaX: patched PLT emulation #6 */
11875+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
11876+
11877+ err = get_user(sethi, (unsigned int *)regs->tpc);
11878+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11879+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11880+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
11881+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
11882+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
11883+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
11884+
11885+ if (err)
11886+ break;
11887+
11888+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11889+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11890+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11891+ sllx == 0x83287020U &&
11892+ (or & 0xFFFFE000U) == 0x8A116000U &&
11893+ jmpl == 0x81C04005U &&
11894+ nop == 0x01000000U)
11895+ {
11896+ unsigned long addr;
11897+
11898+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
11899+ regs->u_regs[UREG_G1] <<= 32;
11900+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
11901+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11902+ regs->tpc = addr;
11903+ regs->tnpc = addr+4;
11904+ return 2;
11905+ }
11906+ } while (0);
11907+
11908+ do { /* PaX: unpatched PLT emulation step 1 */
11909+ unsigned int sethi, ba, nop;
11910+
11911+ err = get_user(sethi, (unsigned int *)regs->tpc);
11912+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
11913+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11914+
11915+ if (err)
11916+ break;
11917+
11918+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11919+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11920+ nop == 0x01000000U)
11921+ {
11922+ unsigned long addr;
11923+ unsigned int save, call;
11924+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
11925+
11926+ if ((ba & 0xFFC00000U) == 0x30800000U)
11927+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11928+ else
11929+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11930+
11931+ if (test_thread_flag(TIF_32BIT))
11932+ addr &= 0xFFFFFFFFUL;
11933+
11934+ err = get_user(save, (unsigned int *)addr);
11935+ err |= get_user(call, (unsigned int *)(addr+4));
11936+ err |= get_user(nop, (unsigned int *)(addr+8));
11937+ if (err)
11938+ break;
11939+
11940+#ifdef CONFIG_PAX_DLRESOLVE
11941+ if (save == 0x9DE3BFA8U &&
11942+ (call & 0xC0000000U) == 0x40000000U &&
11943+ nop == 0x01000000U)
11944+ {
11945+ struct vm_area_struct *vma;
11946+ unsigned long call_dl_resolve;
11947+
11948+ down_read(&current->mm->mmap_sem);
11949+ call_dl_resolve = current->mm->call_dl_resolve;
11950+ up_read(&current->mm->mmap_sem);
11951+ if (likely(call_dl_resolve))
11952+ goto emulate;
11953+
11954+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11955+
11956+ down_write(&current->mm->mmap_sem);
11957+ if (current->mm->call_dl_resolve) {
11958+ call_dl_resolve = current->mm->call_dl_resolve;
11959+ up_write(&current->mm->mmap_sem);
11960+ if (vma)
11961+ kmem_cache_free(vm_area_cachep, vma);
11962+ goto emulate;
11963+ }
11964+
11965+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11966+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11967+ up_write(&current->mm->mmap_sem);
11968+ if (vma)
11969+ kmem_cache_free(vm_area_cachep, vma);
11970+ return 1;
11971+ }
11972+
11973+ if (pax_insert_vma(vma, call_dl_resolve)) {
11974+ up_write(&current->mm->mmap_sem);
11975+ kmem_cache_free(vm_area_cachep, vma);
11976+ return 1;
11977+ }
11978+
11979+ current->mm->call_dl_resolve = call_dl_resolve;
11980+ up_write(&current->mm->mmap_sem);
11981+
11982+emulate:
11983+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11984+ regs->tpc = call_dl_resolve;
11985+ regs->tnpc = addr+4;
11986+ return 3;
11987+ }
11988+#endif
11989+
11990+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11991+ if ((save & 0xFFC00000U) == 0x05000000U &&
11992+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11993+ nop == 0x01000000U)
11994+ {
11995+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11996+ regs->u_regs[UREG_G2] = addr + 4;
11997+ addr = (save & 0x003FFFFFU) << 10;
11998+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11999+
12000+ if (test_thread_flag(TIF_32BIT))
12001+ addr &= 0xFFFFFFFFUL;
12002+
12003+ regs->tpc = addr;
12004+ regs->tnpc = addr+4;
12005+ return 3;
12006+ }
12007+
12008+ /* PaX: 64-bit PLT stub */
12009+ err = get_user(sethi1, (unsigned int *)addr);
12010+ err |= get_user(sethi2, (unsigned int *)(addr+4));
12011+ err |= get_user(or1, (unsigned int *)(addr+8));
12012+ err |= get_user(or2, (unsigned int *)(addr+12));
12013+ err |= get_user(sllx, (unsigned int *)(addr+16));
12014+ err |= get_user(add, (unsigned int *)(addr+20));
12015+ err |= get_user(jmpl, (unsigned int *)(addr+24));
12016+ err |= get_user(nop, (unsigned int *)(addr+28));
12017+ if (err)
12018+ break;
12019+
12020+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
12021+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12022+ (or1 & 0xFFFFE000U) == 0x88112000U &&
12023+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
12024+ sllx == 0x89293020U &&
12025+ add == 0x8A010005U &&
12026+ jmpl == 0x89C14000U &&
12027+ nop == 0x01000000U)
12028+ {
12029+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12030+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12031+ regs->u_regs[UREG_G4] <<= 32;
12032+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12033+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
12034+ regs->u_regs[UREG_G4] = addr + 24;
12035+ addr = regs->u_regs[UREG_G5];
12036+ regs->tpc = addr;
12037+ regs->tnpc = addr+4;
12038+ return 3;
12039+ }
12040+ }
12041+ } while (0);
12042+
12043+#ifdef CONFIG_PAX_DLRESOLVE
12044+ do { /* PaX: unpatched PLT emulation step 2 */
12045+ unsigned int save, call, nop;
12046+
12047+ err = get_user(save, (unsigned int *)(regs->tpc-4));
12048+ err |= get_user(call, (unsigned int *)regs->tpc);
12049+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
12050+ if (err)
12051+ break;
12052+
12053+ if (save == 0x9DE3BFA8U &&
12054+ (call & 0xC0000000U) == 0x40000000U &&
12055+ nop == 0x01000000U)
12056+ {
12057+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12058+
12059+ if (test_thread_flag(TIF_32BIT))
12060+ dl_resolve &= 0xFFFFFFFFUL;
12061+
12062+ regs->u_regs[UREG_RETPC] = regs->tpc;
12063+ regs->tpc = dl_resolve;
12064+ regs->tnpc = dl_resolve+4;
12065+ return 3;
12066+ }
12067+ } while (0);
12068+#endif
12069+
12070+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
12071+ unsigned int sethi, ba, nop;
12072+
12073+ err = get_user(sethi, (unsigned int *)regs->tpc);
12074+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12075+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12076+
12077+ if (err)
12078+ break;
12079+
12080+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12081+ (ba & 0xFFF00000U) == 0x30600000U &&
12082+ nop == 0x01000000U)
12083+ {
12084+ unsigned long addr;
12085+
12086+ addr = (sethi & 0x003FFFFFU) << 10;
12087+ regs->u_regs[UREG_G1] = addr;
12088+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12089+
12090+ if (test_thread_flag(TIF_32BIT))
12091+ addr &= 0xFFFFFFFFUL;
12092+
12093+ regs->tpc = addr;
12094+ regs->tnpc = addr+4;
12095+ return 2;
12096+ }
12097+ } while (0);
12098+
12099+#endif
12100+
12101+ return 1;
12102+}
12103+
12104+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
12105+{
12106+ unsigned long i;
12107+
12108+ printk(KERN_ERR "PAX: bytes at PC: ");
12109+ for (i = 0; i < 8; i++) {
12110+ unsigned int c;
12111+ if (get_user(c, (unsigned int *)pc+i))
12112+ printk(KERN_CONT "???????? ");
12113+ else
12114+ printk(KERN_CONT "%08x ", c);
12115+ }
12116+ printk("\n");
12117+}
12118+#endif
12119+
12120 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
12121 {
12122 enum ctx_state prev_state = exception_enter();
12123@@ -353,6 +816,29 @@ retry:
12124 if (!vma)
12125 goto bad_area;
12126
12127+#ifdef CONFIG_PAX_PAGEEXEC
12128+ /* PaX: detect ITLB misses on non-exec pages */
12129+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
12130+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
12131+ {
12132+ if (address != regs->tpc)
12133+ goto good_area;
12134+
12135+ up_read(&mm->mmap_sem);
12136+ switch (pax_handle_fetch_fault(regs)) {
12137+
12138+#ifdef CONFIG_PAX_EMUPLT
12139+ case 2:
12140+ case 3:
12141+ return;
12142+#endif
12143+
12144+ }
12145+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
12146+ do_group_exit(SIGKILL);
12147+ }
12148+#endif
12149+
12150 /* Pure DTLB misses do not tell us whether the fault causing
12151 * load/store/atomic was a write or not, it only says that there
12152 * was no match. So in such a case we (carefully) read the
12153diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
12154index 4242eab..9ae6360 100644
12155--- a/arch/sparc/mm/hugetlbpage.c
12156+++ b/arch/sparc/mm/hugetlbpage.c
12157@@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12158 unsigned long addr,
12159 unsigned long len,
12160 unsigned long pgoff,
12161- unsigned long flags)
12162+ unsigned long flags,
12163+ unsigned long offset)
12164 {
12165+ struct mm_struct *mm = current->mm;
12166 unsigned long task_size = TASK_SIZE;
12167 struct vm_unmapped_area_info info;
12168
12169@@ -35,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12170
12171 info.flags = 0;
12172 info.length = len;
12173- info.low_limit = TASK_UNMAPPED_BASE;
12174+ info.low_limit = mm->mmap_base;
12175 info.high_limit = min(task_size, VA_EXCLUDE_START);
12176 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12177 info.align_offset = 0;
12178+ info.threadstack_offset = offset;
12179 addr = vm_unmapped_area(&info);
12180
12181 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
12182 VM_BUG_ON(addr != -ENOMEM);
12183 info.low_limit = VA_EXCLUDE_END;
12184+
12185+#ifdef CONFIG_PAX_RANDMMAP
12186+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12187+ info.low_limit += mm->delta_mmap;
12188+#endif
12189+
12190 info.high_limit = task_size;
12191 addr = vm_unmapped_area(&info);
12192 }
12193@@ -55,7 +64,8 @@ static unsigned long
12194 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12195 const unsigned long len,
12196 const unsigned long pgoff,
12197- const unsigned long flags)
12198+ const unsigned long flags,
12199+ const unsigned long offset)
12200 {
12201 struct mm_struct *mm = current->mm;
12202 unsigned long addr = addr0;
12203@@ -70,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12204 info.high_limit = mm->mmap_base;
12205 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12206 info.align_offset = 0;
12207+ info.threadstack_offset = offset;
12208 addr = vm_unmapped_area(&info);
12209
12210 /*
12211@@ -82,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12212 VM_BUG_ON(addr != -ENOMEM);
12213 info.flags = 0;
12214 info.low_limit = TASK_UNMAPPED_BASE;
12215+
12216+#ifdef CONFIG_PAX_RANDMMAP
12217+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12218+ info.low_limit += mm->delta_mmap;
12219+#endif
12220+
12221 info.high_limit = STACK_TOP32;
12222 addr = vm_unmapped_area(&info);
12223 }
12224@@ -96,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12225 struct mm_struct *mm = current->mm;
12226 struct vm_area_struct *vma;
12227 unsigned long task_size = TASK_SIZE;
12228+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
12229
12230 if (test_thread_flag(TIF_32BIT))
12231 task_size = STACK_TOP32;
12232@@ -111,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12233 return addr;
12234 }
12235
12236+#ifdef CONFIG_PAX_RANDMMAP
12237+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
12238+#endif
12239+
12240 if (addr) {
12241 addr = ALIGN(addr, HPAGE_SIZE);
12242 vma = find_vma(mm, addr);
12243- if (task_size - len >= addr &&
12244- (!vma || addr + len <= vma->vm_start))
12245+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
12246 return addr;
12247 }
12248 if (mm->get_unmapped_area == arch_get_unmapped_area)
12249 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
12250- pgoff, flags);
12251+ pgoff, flags, offset);
12252 else
12253 return hugetlb_get_unmapped_area_topdown(file, addr, len,
12254- pgoff, flags);
12255+ pgoff, flags, offset);
12256 }
12257
12258 pte_t *huge_pte_alloc(struct mm_struct *mm,
12259diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
12260index 4ca0d6b..e89bca1 100644
12261--- a/arch/sparc/mm/init_64.c
12262+++ b/arch/sparc/mm/init_64.c
12263@@ -186,9 +186,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
12264 int num_kernel_image_mappings;
12265
12266 #ifdef CONFIG_DEBUG_DCFLUSH
12267-atomic_t dcpage_flushes = ATOMIC_INIT(0);
12268+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
12269 #ifdef CONFIG_SMP
12270-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12271+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12272 #endif
12273 #endif
12274
12275@@ -196,7 +196,7 @@ inline void flush_dcache_page_impl(struct page *page)
12276 {
12277 BUG_ON(tlb_type == hypervisor);
12278 #ifdef CONFIG_DEBUG_DCFLUSH
12279- atomic_inc(&dcpage_flushes);
12280+ atomic_inc_unchecked(&dcpage_flushes);
12281 #endif
12282
12283 #ifdef DCACHE_ALIASING_POSSIBLE
12284@@ -468,10 +468,10 @@ void mmu_info(struct seq_file *m)
12285
12286 #ifdef CONFIG_DEBUG_DCFLUSH
12287 seq_printf(m, "DCPageFlushes\t: %d\n",
12288- atomic_read(&dcpage_flushes));
12289+ atomic_read_unchecked(&dcpage_flushes));
12290 #ifdef CONFIG_SMP
12291 seq_printf(m, "DCPageFlushesXC\t: %d\n",
12292- atomic_read(&dcpage_flushes_xcall));
12293+ atomic_read_unchecked(&dcpage_flushes_xcall));
12294 #endif /* CONFIG_SMP */
12295 #endif /* CONFIG_DEBUG_DCFLUSH */
12296 }
12297diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
12298index 7cca418..53fc030 100644
12299--- a/arch/tile/Kconfig
12300+++ b/arch/tile/Kconfig
12301@@ -192,6 +192,7 @@ source "kernel/Kconfig.hz"
12302
12303 config KEXEC
12304 bool "kexec system call"
12305+ depends on !GRKERNSEC_KMEM
12306 ---help---
12307 kexec is a system call that implements the ability to shutdown your
12308 current kernel, and to start another kernel. It is like a reboot
12309diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
12310index 7b11c5f..755a026 100644
12311--- a/arch/tile/include/asm/atomic_64.h
12312+++ b/arch/tile/include/asm/atomic_64.h
12313@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
12314
12315 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
12316
12317+#define atomic64_read_unchecked(v) atomic64_read(v)
12318+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
12319+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
12320+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
12321+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
12322+#define atomic64_inc_unchecked(v) atomic64_inc(v)
12323+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
12324+#define atomic64_dec_unchecked(v) atomic64_dec(v)
12325+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
12326+
12327 /* Define this to indicate that cmpxchg is an efficient operation. */
12328 #define __HAVE_ARCH_CMPXCHG
12329
12330diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
12331index 6160761..00cac88 100644
12332--- a/arch/tile/include/asm/cache.h
12333+++ b/arch/tile/include/asm/cache.h
12334@@ -15,11 +15,12 @@
12335 #ifndef _ASM_TILE_CACHE_H
12336 #define _ASM_TILE_CACHE_H
12337
12338+#include <linux/const.h>
12339 #include <arch/chip.h>
12340
12341 /* bytes per L1 data cache line */
12342 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
12343-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12344+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12345
12346 /* bytes per L2 cache line */
12347 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
12348diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
12349index f41cb53..31d3ab4 100644
12350--- a/arch/tile/include/asm/uaccess.h
12351+++ b/arch/tile/include/asm/uaccess.h
12352@@ -417,9 +417,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
12353 const void __user *from,
12354 unsigned long n)
12355 {
12356- int sz = __compiletime_object_size(to);
12357+ size_t sz = __compiletime_object_size(to);
12358
12359- if (likely(sz == -1 || sz >= n))
12360+ if (likely(sz == (size_t)-1 || sz >= n))
12361 n = _copy_from_user(to, from, n);
12362 else
12363 copy_from_user_overflow();
12364diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
12365index 8416240..a012fb7 100644
12366--- a/arch/tile/mm/hugetlbpage.c
12367+++ b/arch/tile/mm/hugetlbpage.c
12368@@ -179,6 +179,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
12369 info.high_limit = TASK_SIZE;
12370 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12371 info.align_offset = 0;
12372+ info.threadstack_offset = 0;
12373 return vm_unmapped_area(&info);
12374 }
12375
12376@@ -196,6 +197,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
12377 info.high_limit = current->mm->mmap_base;
12378 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12379 info.align_offset = 0;
12380+ info.threadstack_offset = 0;
12381 addr = vm_unmapped_area(&info);
12382
12383 /*
12384diff --git a/arch/um/Makefile b/arch/um/Makefile
12385index e4b1a96..16162f8 100644
12386--- a/arch/um/Makefile
12387+++ b/arch/um/Makefile
12388@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
12389 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
12390 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
12391
12392+ifdef CONSTIFY_PLUGIN
12393+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12394+endif
12395+
12396 #This will adjust *FLAGS accordingly to the platform.
12397 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
12398
12399diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
12400index 19e1bdd..3665b77 100644
12401--- a/arch/um/include/asm/cache.h
12402+++ b/arch/um/include/asm/cache.h
12403@@ -1,6 +1,7 @@
12404 #ifndef __UM_CACHE_H
12405 #define __UM_CACHE_H
12406
12407+#include <linux/const.h>
12408
12409 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
12410 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
12411@@ -12,6 +13,6 @@
12412 # define L1_CACHE_SHIFT 5
12413 #endif
12414
12415-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12416+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12417
12418 #endif
12419diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
12420index 2e0a6b1..a64d0f5 100644
12421--- a/arch/um/include/asm/kmap_types.h
12422+++ b/arch/um/include/asm/kmap_types.h
12423@@ -8,6 +8,6 @@
12424
12425 /* No more #include "asm/arch/kmap_types.h" ! */
12426
12427-#define KM_TYPE_NR 14
12428+#define KM_TYPE_NR 15
12429
12430 #endif
12431diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
12432index 71c5d13..4c7b9f1 100644
12433--- a/arch/um/include/asm/page.h
12434+++ b/arch/um/include/asm/page.h
12435@@ -14,6 +14,9 @@
12436 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
12437 #define PAGE_MASK (~(PAGE_SIZE-1))
12438
12439+#define ktla_ktva(addr) (addr)
12440+#define ktva_ktla(addr) (addr)
12441+
12442 #ifndef __ASSEMBLY__
12443
12444 struct page;
12445diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
12446index 2b4274e..754fe06 100644
12447--- a/arch/um/include/asm/pgtable-3level.h
12448+++ b/arch/um/include/asm/pgtable-3level.h
12449@@ -58,6 +58,7 @@
12450 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
12451 #define pud_populate(mm, pud, pmd) \
12452 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
12453+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
12454
12455 #ifdef CONFIG_64BIT
12456 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
12457diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
12458index f17bca8..48adb87 100644
12459--- a/arch/um/kernel/process.c
12460+++ b/arch/um/kernel/process.c
12461@@ -356,22 +356,6 @@ int singlestepping(void * t)
12462 return 2;
12463 }
12464
12465-/*
12466- * Only x86 and x86_64 have an arch_align_stack().
12467- * All other arches have "#define arch_align_stack(x) (x)"
12468- * in their asm/exec.h
12469- * As this is included in UML from asm-um/system-generic.h,
12470- * we can use it to behave as the subarch does.
12471- */
12472-#ifndef arch_align_stack
12473-unsigned long arch_align_stack(unsigned long sp)
12474-{
12475- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
12476- sp -= get_random_int() % 8192;
12477- return sp & ~0xf;
12478-}
12479-#endif
12480-
12481 unsigned long get_wchan(struct task_struct *p)
12482 {
12483 unsigned long stack_page, sp, ip;
12484diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
12485index ad8f795..2c7eec6 100644
12486--- a/arch/unicore32/include/asm/cache.h
12487+++ b/arch/unicore32/include/asm/cache.h
12488@@ -12,8 +12,10 @@
12489 #ifndef __UNICORE_CACHE_H__
12490 #define __UNICORE_CACHE_H__
12491
12492-#define L1_CACHE_SHIFT (5)
12493-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12494+#include <linux/const.h>
12495+
12496+#define L1_CACHE_SHIFT 5
12497+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12498
12499 /*
12500 * Memory returned by kmalloc() may be used for DMA, so we must make
12501diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
12502index b7d31ca..9481ec5 100644
12503--- a/arch/x86/Kconfig
12504+++ b/arch/x86/Kconfig
12505@@ -132,7 +132,7 @@ config X86
12506 select RTC_LIB
12507 select HAVE_DEBUG_STACKOVERFLOW
12508 select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
12509- select HAVE_CC_STACKPROTECTOR
12510+ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
12511 select GENERIC_CPU_AUTOPROBE
12512 select HAVE_ARCH_AUDITSYSCALL
12513 select ARCH_SUPPORTS_ATOMIC_RMW
12514@@ -266,7 +266,7 @@ config X86_HT
12515
12516 config X86_32_LAZY_GS
12517 def_bool y
12518- depends on X86_32 && !CC_STACKPROTECTOR
12519+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
12520
12521 config ARCH_HWEIGHT_CFLAGS
12522 string
12523@@ -632,6 +632,7 @@ config SCHED_OMIT_FRAME_POINTER
12524
12525 menuconfig HYPERVISOR_GUEST
12526 bool "Linux guest support"
12527+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
12528 ---help---
12529 Say Y here to enable options for running Linux under various hyper-
12530 visors. This option enables basic hypervisor detection and platform
12531@@ -1013,6 +1014,7 @@ config VM86
12532
12533 config X86_16BIT
12534 bool "Enable support for 16-bit segments" if EXPERT
12535+ depends on !GRKERNSEC
12536 default y
12537 ---help---
12538 This option is required by programs like Wine to run 16-bit
12539@@ -1186,6 +1188,7 @@ choice
12540
12541 config NOHIGHMEM
12542 bool "off"
12543+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12544 ---help---
12545 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
12546 However, the address space of 32-bit x86 processors is only 4
12547@@ -1222,6 +1225,7 @@ config NOHIGHMEM
12548
12549 config HIGHMEM4G
12550 bool "4GB"
12551+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12552 ---help---
12553 Select this if you have a 32-bit processor and between 1 and 4
12554 gigabytes of physical RAM.
12555@@ -1274,7 +1278,7 @@ config PAGE_OFFSET
12556 hex
12557 default 0xB0000000 if VMSPLIT_3G_OPT
12558 default 0x80000000 if VMSPLIT_2G
12559- default 0x78000000 if VMSPLIT_2G_OPT
12560+ default 0x70000000 if VMSPLIT_2G_OPT
12561 default 0x40000000 if VMSPLIT_1G
12562 default 0xC0000000
12563 depends on X86_32
12564@@ -1715,6 +1719,7 @@ source kernel/Kconfig.hz
12565
12566 config KEXEC
12567 bool "kexec system call"
12568+ depends on !GRKERNSEC_KMEM
12569 ---help---
12570 kexec is a system call that implements the ability to shutdown your
12571 current kernel, and to start another kernel. It is like a reboot
12572@@ -1900,7 +1905,9 @@ config X86_NEED_RELOCS
12573
12574 config PHYSICAL_ALIGN
12575 hex "Alignment value to which kernel should be aligned"
12576- default "0x200000"
12577+ default "0x1000000"
12578+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
12579+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
12580 range 0x2000 0x1000000 if X86_32
12581 range 0x200000 0x1000000 if X86_64
12582 ---help---
12583@@ -1983,6 +1990,7 @@ config COMPAT_VDSO
12584 def_bool n
12585 prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
12586 depends on X86_32 || IA32_EMULATION
12587+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
12588 ---help---
12589 Certain buggy versions of glibc will crash if they are
12590 presented with a 32-bit vDSO that is not mapped at the address
12591diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
12592index 6983314..54ad7e8 100644
12593--- a/arch/x86/Kconfig.cpu
12594+++ b/arch/x86/Kconfig.cpu
12595@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
12596
12597 config X86_F00F_BUG
12598 def_bool y
12599- depends on M586MMX || M586TSC || M586 || M486
12600+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
12601
12602 config X86_INVD_BUG
12603 def_bool y
12604@@ -327,7 +327,7 @@ config X86_INVD_BUG
12605
12606 config X86_ALIGNMENT_16
12607 def_bool y
12608- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12609+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12610
12611 config X86_INTEL_USERCOPY
12612 def_bool y
12613@@ -369,7 +369,7 @@ config X86_CMPXCHG64
12614 # generates cmov.
12615 config X86_CMOV
12616 def_bool y
12617- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12618+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12619
12620 config X86_MINIMUM_CPU_FAMILY
12621 int
12622diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
12623index 20028da..88d5946 100644
12624--- a/arch/x86/Kconfig.debug
12625+++ b/arch/x86/Kconfig.debug
12626@@ -93,7 +93,7 @@ config EFI_PGT_DUMP
12627 config DEBUG_RODATA
12628 bool "Write protect kernel read-only data structures"
12629 default y
12630- depends on DEBUG_KERNEL
12631+ depends on DEBUG_KERNEL && BROKEN
12632 ---help---
12633 Mark the kernel read-only data as write-protected in the pagetables,
12634 in order to catch accidental (and incorrect) writes to such const
12635@@ -111,7 +111,7 @@ config DEBUG_RODATA_TEST
12636
12637 config DEBUG_SET_MODULE_RONX
12638 bool "Set loadable kernel module data as NX and text as RO"
12639- depends on MODULES
12640+ depends on MODULES && BROKEN
12641 ---help---
12642 This option helps catch unintended modifications to loadable
12643 kernel module's text and read-only data. It also prevents execution
12644diff --git a/arch/x86/Makefile b/arch/x86/Makefile
12645index 5ba2d9c..41e5bb6 100644
12646--- a/arch/x86/Makefile
12647+++ b/arch/x86/Makefile
12648@@ -65,9 +65,6 @@ ifeq ($(CONFIG_X86_32),y)
12649 # CPU-specific tuning. Anything which can be shared with UML should go here.
12650 include $(srctree)/arch/x86/Makefile_32.cpu
12651 KBUILD_CFLAGS += $(cflags-y)
12652-
12653- # temporary until string.h is fixed
12654- KBUILD_CFLAGS += -ffreestanding
12655 else
12656 BITS := 64
12657 UTS_MACHINE := x86_64
12658@@ -107,6 +104,9 @@ else
12659 KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
12660 endif
12661
12662+# temporary until string.h is fixed
12663+KBUILD_CFLAGS += -ffreestanding
12664+
12665 # Make sure compiler does not have buggy stack-protector support.
12666 ifdef CONFIG_CC_STACKPROTECTOR
12667 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
12668@@ -181,6 +181,7 @@ archheaders:
12669 $(Q)$(MAKE) $(build)=arch/x86/syscalls all
12670
12671 archprepare:
12672+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
12673 ifeq ($(CONFIG_KEXEC_FILE),y)
12674 $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
12675 endif
12676@@ -264,3 +265,9 @@ define archhelp
12677 echo ' FDARGS="..." arguments for the booted kernel'
12678 echo ' FDINITRD=file initrd for the booted kernel'
12679 endef
12680+
12681+define OLD_LD
12682+
12683+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
12684+*** Please upgrade your binutils to 2.18 or newer
12685+endef
12686diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
12687index 57bbf2f..b100fce 100644
12688--- a/arch/x86/boot/Makefile
12689+++ b/arch/x86/boot/Makefile
12690@@ -58,6 +58,9 @@ clean-files += cpustr.h
12691 # ---------------------------------------------------------------------------
12692
12693 KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
12694+ifdef CONSTIFY_PLUGIN
12695+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12696+endif
12697 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12698 GCOV_PROFILE := n
12699
12700diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
12701index 878e4b9..20537ab 100644
12702--- a/arch/x86/boot/bitops.h
12703+++ b/arch/x86/boot/bitops.h
12704@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12705 u8 v;
12706 const u32 *p = (const u32 *)addr;
12707
12708- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12709+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12710 return v;
12711 }
12712
12713@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12714
12715 static inline void set_bit(int nr, void *addr)
12716 {
12717- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12718+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12719 }
12720
12721 #endif /* BOOT_BITOPS_H */
12722diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
12723index bd49ec6..94c7f58 100644
12724--- a/arch/x86/boot/boot.h
12725+++ b/arch/x86/boot/boot.h
12726@@ -84,7 +84,7 @@ static inline void io_delay(void)
12727 static inline u16 ds(void)
12728 {
12729 u16 seg;
12730- asm("movw %%ds,%0" : "=rm" (seg));
12731+ asm volatile("movw %%ds,%0" : "=rm" (seg));
12732 return seg;
12733 }
12734
12735diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
12736index 0a291cd..9686efc 100644
12737--- a/arch/x86/boot/compressed/Makefile
12738+++ b/arch/x86/boot/compressed/Makefile
12739@@ -30,6 +30,9 @@ KBUILD_CFLAGS += $(cflags-y)
12740 KBUILD_CFLAGS += -mno-mmx -mno-sse
12741 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
12742 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
12743+ifdef CONSTIFY_PLUGIN
12744+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12745+endif
12746
12747 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12748 GCOV_PROFILE := n
12749diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
12750index a53440e..c3dbf1e 100644
12751--- a/arch/x86/boot/compressed/efi_stub_32.S
12752+++ b/arch/x86/boot/compressed/efi_stub_32.S
12753@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
12754 * parameter 2, ..., param n. To make things easy, we save the return
12755 * address of efi_call_phys in a global variable.
12756 */
12757- popl %ecx
12758- movl %ecx, saved_return_addr(%edx)
12759- /* get the function pointer into ECX*/
12760- popl %ecx
12761- movl %ecx, efi_rt_function_ptr(%edx)
12762+ popl saved_return_addr(%edx)
12763+ popl efi_rt_function_ptr(%edx)
12764
12765 /*
12766 * 3. Call the physical function.
12767 */
12768- call *%ecx
12769+ call *efi_rt_function_ptr(%edx)
12770
12771 /*
12772 * 4. Balance the stack. And because EAX contain the return value,
12773@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
12774 1: popl %edx
12775 subl $1b, %edx
12776
12777- movl efi_rt_function_ptr(%edx), %ecx
12778- pushl %ecx
12779+ pushl efi_rt_function_ptr(%edx)
12780
12781 /*
12782 * 10. Push the saved return address onto the stack and return.
12783 */
12784- movl saved_return_addr(%edx), %ecx
12785- pushl %ecx
12786- ret
12787+ jmpl *saved_return_addr(%edx)
12788 ENDPROC(efi_call_phys)
12789 .previous
12790
12791diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
12792index 630384a..278e788 100644
12793--- a/arch/x86/boot/compressed/efi_thunk_64.S
12794+++ b/arch/x86/boot/compressed/efi_thunk_64.S
12795@@ -189,8 +189,8 @@ efi_gdt64:
12796 .long 0 /* Filled out by user */
12797 .word 0
12798 .quad 0x0000000000000000 /* NULL descriptor */
12799- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12800- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12801+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12802+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12803 .quad 0x0080890000000000 /* TS descriptor */
12804 .quad 0x0000000000000000 /* TS continued */
12805 efi_gdt64_end:
12806diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
12807index 1d7fbbc..36ecd58 100644
12808--- a/arch/x86/boot/compressed/head_32.S
12809+++ b/arch/x86/boot/compressed/head_32.S
12810@@ -140,10 +140,10 @@ preferred_addr:
12811 addl %eax, %ebx
12812 notl %eax
12813 andl %eax, %ebx
12814- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12815+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12816 jge 1f
12817 #endif
12818- movl $LOAD_PHYSICAL_ADDR, %ebx
12819+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12820 1:
12821
12822 /* Target address to relocate to for decompression */
12823diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
12824index 6b1766c..ad465c9 100644
12825--- a/arch/x86/boot/compressed/head_64.S
12826+++ b/arch/x86/boot/compressed/head_64.S
12827@@ -94,10 +94,10 @@ ENTRY(startup_32)
12828 addl %eax, %ebx
12829 notl %eax
12830 andl %eax, %ebx
12831- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12832+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12833 jge 1f
12834 #endif
12835- movl $LOAD_PHYSICAL_ADDR, %ebx
12836+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12837 1:
12838
12839 /* Target address to relocate to for decompression */
12840@@ -322,10 +322,10 @@ preferred_addr:
12841 addq %rax, %rbp
12842 notq %rax
12843 andq %rax, %rbp
12844- cmpq $LOAD_PHYSICAL_ADDR, %rbp
12845+ cmpq $____LOAD_PHYSICAL_ADDR, %rbp
12846 jge 1f
12847 #endif
12848- movq $LOAD_PHYSICAL_ADDR, %rbp
12849+ movq $____LOAD_PHYSICAL_ADDR, %rbp
12850 1:
12851
12852 /* Target address to relocate to for decompression */
12853@@ -434,8 +434,8 @@ gdt:
12854 .long gdt
12855 .word 0
12856 .quad 0x0000000000000000 /* NULL descriptor */
12857- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12858- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12859+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12860+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12861 .quad 0x0080890000000000 /* TS descriptor */
12862 .quad 0x0000000000000000 /* TS continued */
12863 gdt_end:
12864diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
12865index a950864..c710239 100644
12866--- a/arch/x86/boot/compressed/misc.c
12867+++ b/arch/x86/boot/compressed/misc.c
12868@@ -242,7 +242,7 @@ static void handle_relocations(void *output, unsigned long output_len)
12869 * Calculate the delta between where vmlinux was linked to load
12870 * and where it was actually loaded.
12871 */
12872- delta = min_addr - LOAD_PHYSICAL_ADDR;
12873+ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
12874 if (!delta) {
12875 debug_putstr("No relocation needed... ");
12876 return;
12877@@ -324,7 +324,7 @@ static void parse_elf(void *output)
12878 Elf32_Ehdr ehdr;
12879 Elf32_Phdr *phdrs, *phdr;
12880 #endif
12881- void *dest;
12882+ void *dest, *prev;
12883 int i;
12884
12885 memcpy(&ehdr, output, sizeof(ehdr));
12886@@ -351,13 +351,16 @@ static void parse_elf(void *output)
12887 case PT_LOAD:
12888 #ifdef CONFIG_RELOCATABLE
12889 dest = output;
12890- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
12891+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
12892 #else
12893 dest = (void *)(phdr->p_paddr);
12894 #endif
12895 memcpy(dest,
12896 output + phdr->p_offset,
12897 phdr->p_filesz);
12898+ if (i)
12899+ memset(prev, 0xff, dest - prev);
12900+ prev = dest + phdr->p_filesz;
12901 break;
12902 default: /* Ignore other PT_* */ break;
12903 }
12904@@ -416,7 +419,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
12905 error("Destination address too large");
12906 #endif
12907 #ifndef CONFIG_RELOCATABLE
12908- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
12909+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
12910 error("Wrong destination address");
12911 #endif
12912
12913diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
12914index 1fd7d57..0f7d096 100644
12915--- a/arch/x86/boot/cpucheck.c
12916+++ b/arch/x86/boot/cpucheck.c
12917@@ -125,9 +125,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12918 u32 ecx = MSR_K7_HWCR;
12919 u32 eax, edx;
12920
12921- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12922+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12923 eax &= ~(1 << 15);
12924- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12925+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12926
12927 get_cpuflags(); /* Make sure it really did something */
12928 err = check_cpuflags();
12929@@ -140,9 +140,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12930 u32 ecx = MSR_VIA_FCR;
12931 u32 eax, edx;
12932
12933- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12934+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12935 eax |= (1<<1)|(1<<7);
12936- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12937+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12938
12939 set_bit(X86_FEATURE_CX8, cpu.flags);
12940 err = check_cpuflags();
12941@@ -153,12 +153,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12942 u32 eax, edx;
12943 u32 level = 1;
12944
12945- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12946- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12947- asm("cpuid"
12948+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12949+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12950+ asm volatile("cpuid"
12951 : "+a" (level), "=d" (cpu.flags[0])
12952 : : "ecx", "ebx");
12953- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12954+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12955
12956 err = check_cpuflags();
12957 } else if (err == 0x01 &&
12958diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
12959index 16ef025..91e033b 100644
12960--- a/arch/x86/boot/header.S
12961+++ b/arch/x86/boot/header.S
12962@@ -438,10 +438,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
12963 # single linked list of
12964 # struct setup_data
12965
12966-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
12967+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
12968
12969 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
12970+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
12971+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
12972+#else
12973 #define VO_INIT_SIZE (VO__end - VO__text)
12974+#endif
12975 #if ZO_INIT_SIZE > VO_INIT_SIZE
12976 #define INIT_SIZE ZO_INIT_SIZE
12977 #else
12978diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
12979index db75d07..8e6d0af 100644
12980--- a/arch/x86/boot/memory.c
12981+++ b/arch/x86/boot/memory.c
12982@@ -19,7 +19,7 @@
12983
12984 static int detect_memory_e820(void)
12985 {
12986- int count = 0;
12987+ unsigned int count = 0;
12988 struct biosregs ireg, oreg;
12989 struct e820entry *desc = boot_params.e820_map;
12990 static struct e820entry buf; /* static so it is zeroed */
12991diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
12992index ba3e100..6501b8f 100644
12993--- a/arch/x86/boot/video-vesa.c
12994+++ b/arch/x86/boot/video-vesa.c
12995@@ -201,6 +201,7 @@ static void vesa_store_pm_info(void)
12996
12997 boot_params.screen_info.vesapm_seg = oreg.es;
12998 boot_params.screen_info.vesapm_off = oreg.di;
12999+ boot_params.screen_info.vesapm_size = oreg.cx;
13000 }
13001
13002 /*
13003diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
13004index 43eda28..5ab5fdb 100644
13005--- a/arch/x86/boot/video.c
13006+++ b/arch/x86/boot/video.c
13007@@ -96,7 +96,7 @@ static void store_mode_params(void)
13008 static unsigned int get_entry(void)
13009 {
13010 char entry_buf[4];
13011- int i, len = 0;
13012+ unsigned int i, len = 0;
13013 int key;
13014 unsigned int v;
13015
13016diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
13017index 9105655..41779c1 100644
13018--- a/arch/x86/crypto/aes-x86_64-asm_64.S
13019+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
13020@@ -8,6 +8,8 @@
13021 * including this sentence is retained in full.
13022 */
13023
13024+#include <asm/alternative-asm.h>
13025+
13026 .extern crypto_ft_tab
13027 .extern crypto_it_tab
13028 .extern crypto_fl_tab
13029@@ -70,6 +72,8 @@
13030 je B192; \
13031 leaq 32(r9),r9;
13032
13033+#define ret pax_force_retaddr; ret
13034+
13035 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
13036 movq r1,r2; \
13037 movq r3,r4; \
13038diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
13039index 6bd2c6c..368c93e 100644
13040--- a/arch/x86/crypto/aesni-intel_asm.S
13041+++ b/arch/x86/crypto/aesni-intel_asm.S
13042@@ -31,6 +31,7 @@
13043
13044 #include <linux/linkage.h>
13045 #include <asm/inst.h>
13046+#include <asm/alternative-asm.h>
13047
13048 /*
13049 * The following macros are used to move an (un)aligned 16 byte value to/from
13050@@ -217,7 +218,7 @@ enc: .octa 0x2
13051 * num_initial_blocks = b mod 4
13052 * encrypt the initial num_initial_blocks blocks and apply ghash on
13053 * the ciphertext
13054-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13055+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13056 * are clobbered
13057 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13058 */
13059@@ -227,8 +228,8 @@ enc: .octa 0x2
13060 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13061 MOVADQ SHUF_MASK(%rip), %xmm14
13062 mov arg7, %r10 # %r10 = AAD
13063- mov arg8, %r12 # %r12 = aadLen
13064- mov %r12, %r11
13065+ mov arg8, %r15 # %r15 = aadLen
13066+ mov %r15, %r11
13067 pxor %xmm\i, %xmm\i
13068
13069 _get_AAD_loop\num_initial_blocks\operation:
13070@@ -237,17 +238,17 @@ _get_AAD_loop\num_initial_blocks\operation:
13071 psrldq $4, %xmm\i
13072 pxor \TMP1, %xmm\i
13073 add $4, %r10
13074- sub $4, %r12
13075+ sub $4, %r15
13076 jne _get_AAD_loop\num_initial_blocks\operation
13077
13078 cmp $16, %r11
13079 je _get_AAD_loop2_done\num_initial_blocks\operation
13080
13081- mov $16, %r12
13082+ mov $16, %r15
13083 _get_AAD_loop2\num_initial_blocks\operation:
13084 psrldq $4, %xmm\i
13085- sub $4, %r12
13086- cmp %r11, %r12
13087+ sub $4, %r15
13088+ cmp %r11, %r15
13089 jne _get_AAD_loop2\num_initial_blocks\operation
13090
13091 _get_AAD_loop2_done\num_initial_blocks\operation:
13092@@ -442,7 +443,7 @@ _initial_blocks_done\num_initial_blocks\operation:
13093 * num_initial_blocks = b mod 4
13094 * encrypt the initial num_initial_blocks blocks and apply ghash on
13095 * the ciphertext
13096-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13097+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13098 * are clobbered
13099 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13100 */
13101@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
13102 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13103 MOVADQ SHUF_MASK(%rip), %xmm14
13104 mov arg7, %r10 # %r10 = AAD
13105- mov arg8, %r12 # %r12 = aadLen
13106- mov %r12, %r11
13107+ mov arg8, %r15 # %r15 = aadLen
13108+ mov %r15, %r11
13109 pxor %xmm\i, %xmm\i
13110 _get_AAD_loop\num_initial_blocks\operation:
13111 movd (%r10), \TMP1
13112@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13113 psrldq $4, %xmm\i
13114 pxor \TMP1, %xmm\i
13115 add $4, %r10
13116- sub $4, %r12
13117+ sub $4, %r15
13118 jne _get_AAD_loop\num_initial_blocks\operation
13119 cmp $16, %r11
13120 je _get_AAD_loop2_done\num_initial_blocks\operation
13121- mov $16, %r12
13122+ mov $16, %r15
13123 _get_AAD_loop2\num_initial_blocks\operation:
13124 psrldq $4, %xmm\i
13125- sub $4, %r12
13126- cmp %r11, %r12
13127+ sub $4, %r15
13128+ cmp %r11, %r15
13129 jne _get_AAD_loop2\num_initial_blocks\operation
13130 _get_AAD_loop2_done\num_initial_blocks\operation:
13131 PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
13132@@ -1280,7 +1281,7 @@ _esb_loop_\@:
13133 *
13134 *****************************************************************************/
13135 ENTRY(aesni_gcm_dec)
13136- push %r12
13137+ push %r15
13138 push %r13
13139 push %r14
13140 mov %rsp, %r14
13141@@ -1290,8 +1291,8 @@ ENTRY(aesni_gcm_dec)
13142 */
13143 sub $VARIABLE_OFFSET, %rsp
13144 and $~63, %rsp # align rsp to 64 bytes
13145- mov %arg6, %r12
13146- movdqu (%r12), %xmm13 # %xmm13 = HashKey
13147+ mov %arg6, %r15
13148+ movdqu (%r15), %xmm13 # %xmm13 = HashKey
13149 movdqa SHUF_MASK(%rip), %xmm2
13150 PSHUFB_XMM %xmm2, %xmm13
13151
13152@@ -1319,10 +1320,10 @@ ENTRY(aesni_gcm_dec)
13153 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
13154 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
13155 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
13156- mov %r13, %r12
13157- and $(3<<4), %r12
13158+ mov %r13, %r15
13159+ and $(3<<4), %r15
13160 jz _initial_num_blocks_is_0_decrypt
13161- cmp $(2<<4), %r12
13162+ cmp $(2<<4), %r15
13163 jb _initial_num_blocks_is_1_decrypt
13164 je _initial_num_blocks_is_2_decrypt
13165 _initial_num_blocks_is_3_decrypt:
13166@@ -1372,16 +1373,16 @@ _zero_cipher_left_decrypt:
13167 sub $16, %r11
13168 add %r13, %r11
13169 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
13170- lea SHIFT_MASK+16(%rip), %r12
13171- sub %r13, %r12
13172+ lea SHIFT_MASK+16(%rip), %r15
13173+ sub %r13, %r15
13174 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
13175 # (%r13 is the number of bytes in plaintext mod 16)
13176- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13177+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13178 PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
13179
13180 movdqa %xmm1, %xmm2
13181 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
13182- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13183+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13184 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
13185 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
13186 pand %xmm1, %xmm2
13187@@ -1410,9 +1411,9 @@ _less_than_8_bytes_left_decrypt:
13188 sub $1, %r13
13189 jne _less_than_8_bytes_left_decrypt
13190 _multiple_of_16_bytes_decrypt:
13191- mov arg8, %r12 # %r13 = aadLen (number of bytes)
13192- shl $3, %r12 # convert into number of bits
13193- movd %r12d, %xmm15 # len(A) in %xmm15
13194+ mov arg8, %r15 # %r13 = aadLen (number of bytes)
13195+ shl $3, %r15 # convert into number of bits
13196+ movd %r15d, %xmm15 # len(A) in %xmm15
13197 shl $3, %arg4 # len(C) in bits (*128)
13198 MOVQ_R64_XMM %arg4, %xmm1
13199 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13200@@ -1451,7 +1452,8 @@ _return_T_done_decrypt:
13201 mov %r14, %rsp
13202 pop %r14
13203 pop %r13
13204- pop %r12
13205+ pop %r15
13206+ pax_force_retaddr
13207 ret
13208 ENDPROC(aesni_gcm_dec)
13209
13210@@ -1540,7 +1542,7 @@ ENDPROC(aesni_gcm_dec)
13211 * poly = x^128 + x^127 + x^126 + x^121 + 1
13212 ***************************************************************************/
13213 ENTRY(aesni_gcm_enc)
13214- push %r12
13215+ push %r15
13216 push %r13
13217 push %r14
13218 mov %rsp, %r14
13219@@ -1550,8 +1552,8 @@ ENTRY(aesni_gcm_enc)
13220 #
13221 sub $VARIABLE_OFFSET, %rsp
13222 and $~63, %rsp
13223- mov %arg6, %r12
13224- movdqu (%r12), %xmm13
13225+ mov %arg6, %r15
13226+ movdqu (%r15), %xmm13
13227 movdqa SHUF_MASK(%rip), %xmm2
13228 PSHUFB_XMM %xmm2, %xmm13
13229
13230@@ -1575,13 +1577,13 @@ ENTRY(aesni_gcm_enc)
13231 movdqa %xmm13, HashKey(%rsp)
13232 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
13233 and $-16, %r13
13234- mov %r13, %r12
13235+ mov %r13, %r15
13236
13237 # Encrypt first few blocks
13238
13239- and $(3<<4), %r12
13240+ and $(3<<4), %r15
13241 jz _initial_num_blocks_is_0_encrypt
13242- cmp $(2<<4), %r12
13243+ cmp $(2<<4), %r15
13244 jb _initial_num_blocks_is_1_encrypt
13245 je _initial_num_blocks_is_2_encrypt
13246 _initial_num_blocks_is_3_encrypt:
13247@@ -1634,14 +1636,14 @@ _zero_cipher_left_encrypt:
13248 sub $16, %r11
13249 add %r13, %r11
13250 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
13251- lea SHIFT_MASK+16(%rip), %r12
13252- sub %r13, %r12
13253+ lea SHIFT_MASK+16(%rip), %r15
13254+ sub %r13, %r15
13255 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
13256 # (%r13 is the number of bytes in plaintext mod 16)
13257- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13258+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13259 PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
13260 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
13261- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13262+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13263 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
13264 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
13265 movdqa SHUF_MASK(%rip), %xmm10
13266@@ -1674,9 +1676,9 @@ _less_than_8_bytes_left_encrypt:
13267 sub $1, %r13
13268 jne _less_than_8_bytes_left_encrypt
13269 _multiple_of_16_bytes_encrypt:
13270- mov arg8, %r12 # %r12 = addLen (number of bytes)
13271- shl $3, %r12
13272- movd %r12d, %xmm15 # len(A) in %xmm15
13273+ mov arg8, %r15 # %r15 = addLen (number of bytes)
13274+ shl $3, %r15
13275+ movd %r15d, %xmm15 # len(A) in %xmm15
13276 shl $3, %arg4 # len(C) in bits (*128)
13277 MOVQ_R64_XMM %arg4, %xmm1
13278 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13279@@ -1715,7 +1717,8 @@ _return_T_done_encrypt:
13280 mov %r14, %rsp
13281 pop %r14
13282 pop %r13
13283- pop %r12
13284+ pop %r15
13285+ pax_force_retaddr
13286 ret
13287 ENDPROC(aesni_gcm_enc)
13288
13289@@ -1733,6 +1736,7 @@ _key_expansion_256a:
13290 pxor %xmm1, %xmm0
13291 movaps %xmm0, (TKEYP)
13292 add $0x10, TKEYP
13293+ pax_force_retaddr
13294 ret
13295 ENDPROC(_key_expansion_128)
13296 ENDPROC(_key_expansion_256a)
13297@@ -1759,6 +1763,7 @@ _key_expansion_192a:
13298 shufps $0b01001110, %xmm2, %xmm1
13299 movaps %xmm1, 0x10(TKEYP)
13300 add $0x20, TKEYP
13301+ pax_force_retaddr
13302 ret
13303 ENDPROC(_key_expansion_192a)
13304
13305@@ -1779,6 +1784,7 @@ _key_expansion_192b:
13306
13307 movaps %xmm0, (TKEYP)
13308 add $0x10, TKEYP
13309+ pax_force_retaddr
13310 ret
13311 ENDPROC(_key_expansion_192b)
13312
13313@@ -1792,6 +1798,7 @@ _key_expansion_256b:
13314 pxor %xmm1, %xmm2
13315 movaps %xmm2, (TKEYP)
13316 add $0x10, TKEYP
13317+ pax_force_retaddr
13318 ret
13319 ENDPROC(_key_expansion_256b)
13320
13321@@ -1905,6 +1912,7 @@ ENTRY(aesni_set_key)
13322 #ifndef __x86_64__
13323 popl KEYP
13324 #endif
13325+ pax_force_retaddr
13326 ret
13327 ENDPROC(aesni_set_key)
13328
13329@@ -1927,6 +1935,7 @@ ENTRY(aesni_enc)
13330 popl KLEN
13331 popl KEYP
13332 #endif
13333+ pax_force_retaddr
13334 ret
13335 ENDPROC(aesni_enc)
13336
13337@@ -1985,6 +1994,7 @@ _aesni_enc1:
13338 AESENC KEY STATE
13339 movaps 0x70(TKEYP), KEY
13340 AESENCLAST KEY STATE
13341+ pax_force_retaddr
13342 ret
13343 ENDPROC(_aesni_enc1)
13344
13345@@ -2094,6 +2104,7 @@ _aesni_enc4:
13346 AESENCLAST KEY STATE2
13347 AESENCLAST KEY STATE3
13348 AESENCLAST KEY STATE4
13349+ pax_force_retaddr
13350 ret
13351 ENDPROC(_aesni_enc4)
13352
13353@@ -2117,6 +2128,7 @@ ENTRY(aesni_dec)
13354 popl KLEN
13355 popl KEYP
13356 #endif
13357+ pax_force_retaddr
13358 ret
13359 ENDPROC(aesni_dec)
13360
13361@@ -2175,6 +2187,7 @@ _aesni_dec1:
13362 AESDEC KEY STATE
13363 movaps 0x70(TKEYP), KEY
13364 AESDECLAST KEY STATE
13365+ pax_force_retaddr
13366 ret
13367 ENDPROC(_aesni_dec1)
13368
13369@@ -2284,6 +2297,7 @@ _aesni_dec4:
13370 AESDECLAST KEY STATE2
13371 AESDECLAST KEY STATE3
13372 AESDECLAST KEY STATE4
13373+ pax_force_retaddr
13374 ret
13375 ENDPROC(_aesni_dec4)
13376
13377@@ -2342,6 +2356,7 @@ ENTRY(aesni_ecb_enc)
13378 popl KEYP
13379 popl LEN
13380 #endif
13381+ pax_force_retaddr
13382 ret
13383 ENDPROC(aesni_ecb_enc)
13384
13385@@ -2401,6 +2416,7 @@ ENTRY(aesni_ecb_dec)
13386 popl KEYP
13387 popl LEN
13388 #endif
13389+ pax_force_retaddr
13390 ret
13391 ENDPROC(aesni_ecb_dec)
13392
13393@@ -2443,6 +2459,7 @@ ENTRY(aesni_cbc_enc)
13394 popl LEN
13395 popl IVP
13396 #endif
13397+ pax_force_retaddr
13398 ret
13399 ENDPROC(aesni_cbc_enc)
13400
13401@@ -2534,6 +2551,7 @@ ENTRY(aesni_cbc_dec)
13402 popl LEN
13403 popl IVP
13404 #endif
13405+ pax_force_retaddr
13406 ret
13407 ENDPROC(aesni_cbc_dec)
13408
13409@@ -2561,6 +2579,7 @@ _aesni_inc_init:
13410 mov $1, TCTR_LOW
13411 MOVQ_R64_XMM TCTR_LOW INC
13412 MOVQ_R64_XMM CTR TCTR_LOW
13413+ pax_force_retaddr
13414 ret
13415 ENDPROC(_aesni_inc_init)
13416
13417@@ -2590,6 +2609,7 @@ _aesni_inc:
13418 .Linc_low:
13419 movaps CTR, IV
13420 PSHUFB_XMM BSWAP_MASK IV
13421+ pax_force_retaddr
13422 ret
13423 ENDPROC(_aesni_inc)
13424
13425@@ -2651,6 +2671,7 @@ ENTRY(aesni_ctr_enc)
13426 .Lctr_enc_ret:
13427 movups IV, (IVP)
13428 .Lctr_enc_just_ret:
13429+ pax_force_retaddr
13430 ret
13431 ENDPROC(aesni_ctr_enc)
13432
13433@@ -2777,6 +2798,7 @@ ENTRY(aesni_xts_crypt8)
13434 pxor INC, STATE4
13435 movdqu STATE4, 0x70(OUTP)
13436
13437+ pax_force_retaddr
13438 ret
13439 ENDPROC(aesni_xts_crypt8)
13440
13441diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13442index 246c670..466e2d6 100644
13443--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
13444+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13445@@ -21,6 +21,7 @@
13446 */
13447
13448 #include <linux/linkage.h>
13449+#include <asm/alternative-asm.h>
13450
13451 .file "blowfish-x86_64-asm.S"
13452 .text
13453@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
13454 jnz .L__enc_xor;
13455
13456 write_block();
13457+ pax_force_retaddr
13458 ret;
13459 .L__enc_xor:
13460 xor_block();
13461+ pax_force_retaddr
13462 ret;
13463 ENDPROC(__blowfish_enc_blk)
13464
13465@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
13466
13467 movq %r11, %rbp;
13468
13469+ pax_force_retaddr
13470 ret;
13471 ENDPROC(blowfish_dec_blk)
13472
13473@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
13474
13475 popq %rbx;
13476 popq %rbp;
13477+ pax_force_retaddr
13478 ret;
13479
13480 .L__enc_xor4:
13481@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
13482
13483 popq %rbx;
13484 popq %rbp;
13485+ pax_force_retaddr
13486 ret;
13487 ENDPROC(__blowfish_enc_blk_4way)
13488
13489@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
13490 popq %rbx;
13491 popq %rbp;
13492
13493+ pax_force_retaddr
13494 ret;
13495 ENDPROC(blowfish_dec_blk_4way)
13496diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13497index ce71f92..1dce7ec 100644
13498--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13499+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13500@@ -16,6 +16,7 @@
13501 */
13502
13503 #include <linux/linkage.h>
13504+#include <asm/alternative-asm.h>
13505
13506 #define CAMELLIA_TABLE_BYTE_LEN 272
13507
13508@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13509 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
13510 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
13511 %rcx, (%r9));
13512+ pax_force_retaddr
13513 ret;
13514 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13515
13516@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13517 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
13518 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
13519 %rax, (%r9));
13520+ pax_force_retaddr
13521 ret;
13522 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13523
13524@@ -780,6 +783,7 @@ __camellia_enc_blk16:
13525 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13526 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
13527
13528+ pax_force_retaddr
13529 ret;
13530
13531 .align 8
13532@@ -865,6 +869,7 @@ __camellia_dec_blk16:
13533 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13534 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
13535
13536+ pax_force_retaddr
13537 ret;
13538
13539 .align 8
13540@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
13541 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13542 %xmm8, %rsi);
13543
13544+ pax_force_retaddr
13545 ret;
13546 ENDPROC(camellia_ecb_enc_16way)
13547
13548@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
13549 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13550 %xmm8, %rsi);
13551
13552+ pax_force_retaddr
13553 ret;
13554 ENDPROC(camellia_ecb_dec_16way)
13555
13556@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
13557 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13558 %xmm8, %rsi);
13559
13560+ pax_force_retaddr
13561 ret;
13562 ENDPROC(camellia_cbc_dec_16way)
13563
13564@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
13565 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13566 %xmm8, %rsi);
13567
13568+ pax_force_retaddr
13569 ret;
13570 ENDPROC(camellia_ctr_16way)
13571
13572@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
13573 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13574 %xmm8, %rsi);
13575
13576+ pax_force_retaddr
13577 ret;
13578 ENDPROC(camellia_xts_crypt_16way)
13579
13580diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13581index 0e0b886..5a3123c 100644
13582--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13583+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13584@@ -11,6 +11,7 @@
13585 */
13586
13587 #include <linux/linkage.h>
13588+#include <asm/alternative-asm.h>
13589
13590 #define CAMELLIA_TABLE_BYTE_LEN 272
13591
13592@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13593 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
13594 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
13595 %rcx, (%r9));
13596+ pax_force_retaddr
13597 ret;
13598 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13599
13600@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13601 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
13602 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
13603 %rax, (%r9));
13604+ pax_force_retaddr
13605 ret;
13606 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13607
13608@@ -820,6 +823,7 @@ __camellia_enc_blk32:
13609 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13610 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
13611
13612+ pax_force_retaddr
13613 ret;
13614
13615 .align 8
13616@@ -905,6 +909,7 @@ __camellia_dec_blk32:
13617 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13618 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
13619
13620+ pax_force_retaddr
13621 ret;
13622
13623 .align 8
13624@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
13625
13626 vzeroupper;
13627
13628+ pax_force_retaddr
13629 ret;
13630 ENDPROC(camellia_ecb_enc_32way)
13631
13632@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
13633
13634 vzeroupper;
13635
13636+ pax_force_retaddr
13637 ret;
13638 ENDPROC(camellia_ecb_dec_32way)
13639
13640@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
13641
13642 vzeroupper;
13643
13644+ pax_force_retaddr
13645 ret;
13646 ENDPROC(camellia_cbc_dec_32way)
13647
13648@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
13649
13650 vzeroupper;
13651
13652+ pax_force_retaddr
13653 ret;
13654 ENDPROC(camellia_ctr_32way)
13655
13656@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
13657
13658 vzeroupper;
13659
13660+ pax_force_retaddr
13661 ret;
13662 ENDPROC(camellia_xts_crypt_32way)
13663
13664diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
13665index 310319c..db3d7b5 100644
13666--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
13667+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
13668@@ -21,6 +21,7 @@
13669 */
13670
13671 #include <linux/linkage.h>
13672+#include <asm/alternative-asm.h>
13673
13674 .file "camellia-x86_64-asm_64.S"
13675 .text
13676@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
13677 enc_outunpack(mov, RT1);
13678
13679 movq RRBP, %rbp;
13680+ pax_force_retaddr
13681 ret;
13682
13683 .L__enc_xor:
13684 enc_outunpack(xor, RT1);
13685
13686 movq RRBP, %rbp;
13687+ pax_force_retaddr
13688 ret;
13689 ENDPROC(__camellia_enc_blk)
13690
13691@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
13692 dec_outunpack();
13693
13694 movq RRBP, %rbp;
13695+ pax_force_retaddr
13696 ret;
13697 ENDPROC(camellia_dec_blk)
13698
13699@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
13700
13701 movq RRBP, %rbp;
13702 popq %rbx;
13703+ pax_force_retaddr
13704 ret;
13705
13706 .L__enc2_xor:
13707@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
13708
13709 movq RRBP, %rbp;
13710 popq %rbx;
13711+ pax_force_retaddr
13712 ret;
13713 ENDPROC(__camellia_enc_blk_2way)
13714
13715@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
13716
13717 movq RRBP, %rbp;
13718 movq RXOR, %rbx;
13719+ pax_force_retaddr
13720 ret;
13721 ENDPROC(camellia_dec_blk_2way)
13722diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13723index c35fd5d..2d8c7db 100644
13724--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13725+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13726@@ -24,6 +24,7 @@
13727 */
13728
13729 #include <linux/linkage.h>
13730+#include <asm/alternative-asm.h>
13731
13732 .file "cast5-avx-x86_64-asm_64.S"
13733
13734@@ -281,6 +282,7 @@ __cast5_enc_blk16:
13735 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13736 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13737
13738+ pax_force_retaddr
13739 ret;
13740 ENDPROC(__cast5_enc_blk16)
13741
13742@@ -352,6 +354,7 @@ __cast5_dec_blk16:
13743 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13744 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13745
13746+ pax_force_retaddr
13747 ret;
13748
13749 .L__skip_dec:
13750@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
13751 vmovdqu RR4, (6*4*4)(%r11);
13752 vmovdqu RL4, (7*4*4)(%r11);
13753
13754+ pax_force_retaddr
13755 ret;
13756 ENDPROC(cast5_ecb_enc_16way)
13757
13758@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
13759 vmovdqu RR4, (6*4*4)(%r11);
13760 vmovdqu RL4, (7*4*4)(%r11);
13761
13762+ pax_force_retaddr
13763 ret;
13764 ENDPROC(cast5_ecb_dec_16way)
13765
13766@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
13767 * %rdx: src
13768 */
13769
13770- pushq %r12;
13771+ pushq %r14;
13772
13773 movq %rsi, %r11;
13774- movq %rdx, %r12;
13775+ movq %rdx, %r14;
13776
13777 vmovdqu (0*16)(%rdx), RL1;
13778 vmovdqu (1*16)(%rdx), RR1;
13779@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
13780 call __cast5_dec_blk16;
13781
13782 /* xor with src */
13783- vmovq (%r12), RX;
13784+ vmovq (%r14), RX;
13785 vpshufd $0x4f, RX, RX;
13786 vpxor RX, RR1, RR1;
13787- vpxor 0*16+8(%r12), RL1, RL1;
13788- vpxor 1*16+8(%r12), RR2, RR2;
13789- vpxor 2*16+8(%r12), RL2, RL2;
13790- vpxor 3*16+8(%r12), RR3, RR3;
13791- vpxor 4*16+8(%r12), RL3, RL3;
13792- vpxor 5*16+8(%r12), RR4, RR4;
13793- vpxor 6*16+8(%r12), RL4, RL4;
13794+ vpxor 0*16+8(%r14), RL1, RL1;
13795+ vpxor 1*16+8(%r14), RR2, RR2;
13796+ vpxor 2*16+8(%r14), RL2, RL2;
13797+ vpxor 3*16+8(%r14), RR3, RR3;
13798+ vpxor 4*16+8(%r14), RL3, RL3;
13799+ vpxor 5*16+8(%r14), RR4, RR4;
13800+ vpxor 6*16+8(%r14), RL4, RL4;
13801
13802 vmovdqu RR1, (0*16)(%r11);
13803 vmovdqu RL1, (1*16)(%r11);
13804@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
13805 vmovdqu RR4, (6*16)(%r11);
13806 vmovdqu RL4, (7*16)(%r11);
13807
13808- popq %r12;
13809+ popq %r14;
13810
13811+ pax_force_retaddr
13812 ret;
13813 ENDPROC(cast5_cbc_dec_16way)
13814
13815@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
13816 * %rcx: iv (big endian, 64bit)
13817 */
13818
13819- pushq %r12;
13820+ pushq %r14;
13821
13822 movq %rsi, %r11;
13823- movq %rdx, %r12;
13824+ movq %rdx, %r14;
13825
13826 vpcmpeqd RTMP, RTMP, RTMP;
13827 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
13828@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
13829 call __cast5_enc_blk16;
13830
13831 /* dst = src ^ iv */
13832- vpxor (0*16)(%r12), RR1, RR1;
13833- vpxor (1*16)(%r12), RL1, RL1;
13834- vpxor (2*16)(%r12), RR2, RR2;
13835- vpxor (3*16)(%r12), RL2, RL2;
13836- vpxor (4*16)(%r12), RR3, RR3;
13837- vpxor (5*16)(%r12), RL3, RL3;
13838- vpxor (6*16)(%r12), RR4, RR4;
13839- vpxor (7*16)(%r12), RL4, RL4;
13840+ vpxor (0*16)(%r14), RR1, RR1;
13841+ vpxor (1*16)(%r14), RL1, RL1;
13842+ vpxor (2*16)(%r14), RR2, RR2;
13843+ vpxor (3*16)(%r14), RL2, RL2;
13844+ vpxor (4*16)(%r14), RR3, RR3;
13845+ vpxor (5*16)(%r14), RL3, RL3;
13846+ vpxor (6*16)(%r14), RR4, RR4;
13847+ vpxor (7*16)(%r14), RL4, RL4;
13848 vmovdqu RR1, (0*16)(%r11);
13849 vmovdqu RL1, (1*16)(%r11);
13850 vmovdqu RR2, (2*16)(%r11);
13851@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
13852 vmovdqu RR4, (6*16)(%r11);
13853 vmovdqu RL4, (7*16)(%r11);
13854
13855- popq %r12;
13856+ popq %r14;
13857
13858+ pax_force_retaddr
13859 ret;
13860 ENDPROC(cast5_ctr_16way)
13861diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13862index e3531f8..e123f35 100644
13863--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13864+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13865@@ -24,6 +24,7 @@
13866 */
13867
13868 #include <linux/linkage.h>
13869+#include <asm/alternative-asm.h>
13870 #include "glue_helper-asm-avx.S"
13871
13872 .file "cast6-avx-x86_64-asm_64.S"
13873@@ -295,6 +296,7 @@ __cast6_enc_blk8:
13874 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13875 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13876
13877+ pax_force_retaddr
13878 ret;
13879 ENDPROC(__cast6_enc_blk8)
13880
13881@@ -340,6 +342,7 @@ __cast6_dec_blk8:
13882 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13883 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13884
13885+ pax_force_retaddr
13886 ret;
13887 ENDPROC(__cast6_dec_blk8)
13888
13889@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
13890
13891 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13892
13893+ pax_force_retaddr
13894 ret;
13895 ENDPROC(cast6_ecb_enc_8way)
13896
13897@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
13898
13899 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13900
13901+ pax_force_retaddr
13902 ret;
13903 ENDPROC(cast6_ecb_dec_8way)
13904
13905@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
13906 * %rdx: src
13907 */
13908
13909- pushq %r12;
13910+ pushq %r14;
13911
13912 movq %rsi, %r11;
13913- movq %rdx, %r12;
13914+ movq %rdx, %r14;
13915
13916 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13917
13918 call __cast6_dec_blk8;
13919
13920- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13921+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13922
13923- popq %r12;
13924+ popq %r14;
13925
13926+ pax_force_retaddr
13927 ret;
13928 ENDPROC(cast6_cbc_dec_8way)
13929
13930@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
13931 * %rcx: iv (little endian, 128bit)
13932 */
13933
13934- pushq %r12;
13935+ pushq %r14;
13936
13937 movq %rsi, %r11;
13938- movq %rdx, %r12;
13939+ movq %rdx, %r14;
13940
13941 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
13942 RD2, RX, RKR, RKM);
13943
13944 call __cast6_enc_blk8;
13945
13946- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13947+ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13948
13949- popq %r12;
13950+ popq %r14;
13951
13952+ pax_force_retaddr
13953 ret;
13954 ENDPROC(cast6_ctr_8way)
13955
13956@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
13957 /* dst <= regs xor IVs(in dst) */
13958 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13959
13960+ pax_force_retaddr
13961 ret;
13962 ENDPROC(cast6_xts_enc_8way)
13963
13964@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
13965 /* dst <= regs xor IVs(in dst) */
13966 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13967
13968+ pax_force_retaddr
13969 ret;
13970 ENDPROC(cast6_xts_dec_8way)
13971diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13972index 26d49eb..c0a8c84 100644
13973--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13974+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13975@@ -45,6 +45,7 @@
13976
13977 #include <asm/inst.h>
13978 #include <linux/linkage.h>
13979+#include <asm/alternative-asm.h>
13980
13981 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
13982
13983@@ -309,6 +310,7 @@ do_return:
13984 popq %rsi
13985 popq %rdi
13986 popq %rbx
13987+ pax_force_retaddr
13988 ret
13989
13990 ################################################################
13991diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
13992index 5d1e007..098cb4f 100644
13993--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
13994+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
13995@@ -18,6 +18,7 @@
13996
13997 #include <linux/linkage.h>
13998 #include <asm/inst.h>
13999+#include <asm/alternative-asm.h>
14000
14001 .data
14002
14003@@ -89,6 +90,7 @@ __clmul_gf128mul_ble:
14004 psrlq $1, T2
14005 pxor T2, T1
14006 pxor T1, DATA
14007+ pax_force_retaddr
14008 ret
14009 ENDPROC(__clmul_gf128mul_ble)
14010
14011@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul)
14012 call __clmul_gf128mul_ble
14013 PSHUFB_XMM BSWAP DATA
14014 movups DATA, (%rdi)
14015+ pax_force_retaddr
14016 ret
14017 ENDPROC(clmul_ghash_mul)
14018
14019@@ -128,5 +131,6 @@ ENTRY(clmul_ghash_update)
14020 PSHUFB_XMM BSWAP DATA
14021 movups DATA, (%rdi)
14022 .Lupdate_just_ret:
14023+ pax_force_retaddr
14024 ret
14025 ENDPROC(clmul_ghash_update)
14026diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14027index 9279e0b..c4b3d2c 100644
14028--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
14029+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14030@@ -1,4 +1,5 @@
14031 #include <linux/linkage.h>
14032+#include <asm/alternative-asm.h>
14033
14034 # enter salsa20_encrypt_bytes
14035 ENTRY(salsa20_encrypt_bytes)
14036@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
14037 add %r11,%rsp
14038 mov %rdi,%rax
14039 mov %rsi,%rdx
14040+ pax_force_retaddr
14041 ret
14042 # bytesatleast65:
14043 ._bytesatleast65:
14044@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
14045 add %r11,%rsp
14046 mov %rdi,%rax
14047 mov %rsi,%rdx
14048+ pax_force_retaddr
14049 ret
14050 ENDPROC(salsa20_keysetup)
14051
14052@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
14053 add %r11,%rsp
14054 mov %rdi,%rax
14055 mov %rsi,%rdx
14056+ pax_force_retaddr
14057 ret
14058 ENDPROC(salsa20_ivsetup)
14059diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14060index 2f202f4..d9164d6 100644
14061--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14062+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14063@@ -24,6 +24,7 @@
14064 */
14065
14066 #include <linux/linkage.h>
14067+#include <asm/alternative-asm.h>
14068 #include "glue_helper-asm-avx.S"
14069
14070 .file "serpent-avx-x86_64-asm_64.S"
14071@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
14072 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14073 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14074
14075+ pax_force_retaddr
14076 ret;
14077 ENDPROC(__serpent_enc_blk8_avx)
14078
14079@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
14080 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14081 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14082
14083+ pax_force_retaddr
14084 ret;
14085 ENDPROC(__serpent_dec_blk8_avx)
14086
14087@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
14088
14089 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14090
14091+ pax_force_retaddr
14092 ret;
14093 ENDPROC(serpent_ecb_enc_8way_avx)
14094
14095@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
14096
14097 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14098
14099+ pax_force_retaddr
14100 ret;
14101 ENDPROC(serpent_ecb_dec_8way_avx)
14102
14103@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
14104
14105 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14106
14107+ pax_force_retaddr
14108 ret;
14109 ENDPROC(serpent_cbc_dec_8way_avx)
14110
14111@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
14112
14113 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14114
14115+ pax_force_retaddr
14116 ret;
14117 ENDPROC(serpent_ctr_8way_avx)
14118
14119@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
14120 /* dst <= regs xor IVs(in dst) */
14121 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14122
14123+ pax_force_retaddr
14124 ret;
14125 ENDPROC(serpent_xts_enc_8way_avx)
14126
14127@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
14128 /* dst <= regs xor IVs(in dst) */
14129 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14130
14131+ pax_force_retaddr
14132 ret;
14133 ENDPROC(serpent_xts_dec_8way_avx)
14134diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
14135index b222085..abd483c 100644
14136--- a/arch/x86/crypto/serpent-avx2-asm_64.S
14137+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
14138@@ -15,6 +15,7 @@
14139 */
14140
14141 #include <linux/linkage.h>
14142+#include <asm/alternative-asm.h>
14143 #include "glue_helper-asm-avx2.S"
14144
14145 .file "serpent-avx2-asm_64.S"
14146@@ -610,6 +611,7 @@ __serpent_enc_blk16:
14147 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14148 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14149
14150+ pax_force_retaddr
14151 ret;
14152 ENDPROC(__serpent_enc_blk16)
14153
14154@@ -664,6 +666,7 @@ __serpent_dec_blk16:
14155 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14156 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14157
14158+ pax_force_retaddr
14159 ret;
14160 ENDPROC(__serpent_dec_blk16)
14161
14162@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
14163
14164 vzeroupper;
14165
14166+ pax_force_retaddr
14167 ret;
14168 ENDPROC(serpent_ecb_enc_16way)
14169
14170@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
14171
14172 vzeroupper;
14173
14174+ pax_force_retaddr
14175 ret;
14176 ENDPROC(serpent_ecb_dec_16way)
14177
14178@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
14179
14180 vzeroupper;
14181
14182+ pax_force_retaddr
14183 ret;
14184 ENDPROC(serpent_cbc_dec_16way)
14185
14186@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
14187
14188 vzeroupper;
14189
14190+ pax_force_retaddr
14191 ret;
14192 ENDPROC(serpent_ctr_16way)
14193
14194@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
14195
14196 vzeroupper;
14197
14198+ pax_force_retaddr
14199 ret;
14200 ENDPROC(serpent_xts_enc_16way)
14201
14202@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
14203
14204 vzeroupper;
14205
14206+ pax_force_retaddr
14207 ret;
14208 ENDPROC(serpent_xts_dec_16way)
14209diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14210index acc066c..1559cc4 100644
14211--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14212+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14213@@ -25,6 +25,7 @@
14214 */
14215
14216 #include <linux/linkage.h>
14217+#include <asm/alternative-asm.h>
14218
14219 .file "serpent-sse2-x86_64-asm_64.S"
14220 .text
14221@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
14222 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14223 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14224
14225+ pax_force_retaddr
14226 ret;
14227
14228 .L__enc_xor8:
14229 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14230 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14231
14232+ pax_force_retaddr
14233 ret;
14234 ENDPROC(__serpent_enc_blk_8way)
14235
14236@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
14237 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14238 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14239
14240+ pax_force_retaddr
14241 ret;
14242 ENDPROC(serpent_dec_blk_8way)
14243diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
14244index a410950..9dfe7ad 100644
14245--- a/arch/x86/crypto/sha1_ssse3_asm.S
14246+++ b/arch/x86/crypto/sha1_ssse3_asm.S
14247@@ -29,6 +29,7 @@
14248 */
14249
14250 #include <linux/linkage.h>
14251+#include <asm/alternative-asm.h>
14252
14253 #define CTX %rdi // arg1
14254 #define BUF %rsi // arg2
14255@@ -75,9 +76,9 @@
14256
14257 push %rbx
14258 push %rbp
14259- push %r12
14260+ push %r14
14261
14262- mov %rsp, %r12
14263+ mov %rsp, %r14
14264 sub $64, %rsp # allocate workspace
14265 and $~15, %rsp # align stack
14266
14267@@ -99,11 +100,12 @@
14268 xor %rax, %rax
14269 rep stosq
14270
14271- mov %r12, %rsp # deallocate workspace
14272+ mov %r14, %rsp # deallocate workspace
14273
14274- pop %r12
14275+ pop %r14
14276 pop %rbp
14277 pop %rbx
14278+ pax_force_retaddr
14279 ret
14280
14281 ENDPROC(\name)
14282diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
14283index 642f156..51a513c 100644
14284--- a/arch/x86/crypto/sha256-avx-asm.S
14285+++ b/arch/x86/crypto/sha256-avx-asm.S
14286@@ -49,6 +49,7 @@
14287
14288 #ifdef CONFIG_AS_AVX
14289 #include <linux/linkage.h>
14290+#include <asm/alternative-asm.h>
14291
14292 ## assume buffers not aligned
14293 #define VMOVDQ vmovdqu
14294@@ -460,6 +461,7 @@ done_hash:
14295 popq %r13
14296 popq %rbp
14297 popq %rbx
14298+ pax_force_retaddr
14299 ret
14300 ENDPROC(sha256_transform_avx)
14301
14302diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
14303index 9e86944..3795e6a 100644
14304--- a/arch/x86/crypto/sha256-avx2-asm.S
14305+++ b/arch/x86/crypto/sha256-avx2-asm.S
14306@@ -50,6 +50,7 @@
14307
14308 #ifdef CONFIG_AS_AVX2
14309 #include <linux/linkage.h>
14310+#include <asm/alternative-asm.h>
14311
14312 ## assume buffers not aligned
14313 #define VMOVDQ vmovdqu
14314@@ -720,6 +721,7 @@ done_hash:
14315 popq %r12
14316 popq %rbp
14317 popq %rbx
14318+ pax_force_retaddr
14319 ret
14320 ENDPROC(sha256_transform_rorx)
14321
14322diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
14323index f833b74..8c62a9e 100644
14324--- a/arch/x86/crypto/sha256-ssse3-asm.S
14325+++ b/arch/x86/crypto/sha256-ssse3-asm.S
14326@@ -47,6 +47,7 @@
14327 ########################################################################
14328
14329 #include <linux/linkage.h>
14330+#include <asm/alternative-asm.h>
14331
14332 ## assume buffers not aligned
14333 #define MOVDQ movdqu
14334@@ -471,6 +472,7 @@ done_hash:
14335 popq %rbp
14336 popq %rbx
14337
14338+ pax_force_retaddr
14339 ret
14340 ENDPROC(sha256_transform_ssse3)
14341
14342diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
14343index 974dde9..a823ff9 100644
14344--- a/arch/x86/crypto/sha512-avx-asm.S
14345+++ b/arch/x86/crypto/sha512-avx-asm.S
14346@@ -49,6 +49,7 @@
14347
14348 #ifdef CONFIG_AS_AVX
14349 #include <linux/linkage.h>
14350+#include <asm/alternative-asm.h>
14351
14352 .text
14353
14354@@ -364,6 +365,7 @@ updateblock:
14355 mov frame_RSPSAVE(%rsp), %rsp
14356
14357 nowork:
14358+ pax_force_retaddr
14359 ret
14360 ENDPROC(sha512_transform_avx)
14361
14362diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
14363index 568b961..ed20c37 100644
14364--- a/arch/x86/crypto/sha512-avx2-asm.S
14365+++ b/arch/x86/crypto/sha512-avx2-asm.S
14366@@ -51,6 +51,7 @@
14367
14368 #ifdef CONFIG_AS_AVX2
14369 #include <linux/linkage.h>
14370+#include <asm/alternative-asm.h>
14371
14372 .text
14373
14374@@ -678,6 +679,7 @@ done_hash:
14375
14376 # Restore Stack Pointer
14377 mov frame_RSPSAVE(%rsp), %rsp
14378+ pax_force_retaddr
14379 ret
14380 ENDPROC(sha512_transform_rorx)
14381
14382diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
14383index fb56855..6edd768 100644
14384--- a/arch/x86/crypto/sha512-ssse3-asm.S
14385+++ b/arch/x86/crypto/sha512-ssse3-asm.S
14386@@ -48,6 +48,7 @@
14387 ########################################################################
14388
14389 #include <linux/linkage.h>
14390+#include <asm/alternative-asm.h>
14391
14392 .text
14393
14394@@ -363,6 +364,7 @@ updateblock:
14395 mov frame_RSPSAVE(%rsp), %rsp
14396
14397 nowork:
14398+ pax_force_retaddr
14399 ret
14400 ENDPROC(sha512_transform_ssse3)
14401
14402diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14403index 0505813..b067311 100644
14404--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14405+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14406@@ -24,6 +24,7 @@
14407 */
14408
14409 #include <linux/linkage.h>
14410+#include <asm/alternative-asm.h>
14411 #include "glue_helper-asm-avx.S"
14412
14413 .file "twofish-avx-x86_64-asm_64.S"
14414@@ -284,6 +285,7 @@ __twofish_enc_blk8:
14415 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
14416 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
14417
14418+ pax_force_retaddr
14419 ret;
14420 ENDPROC(__twofish_enc_blk8)
14421
14422@@ -324,6 +326,7 @@ __twofish_dec_blk8:
14423 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
14424 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
14425
14426+ pax_force_retaddr
14427 ret;
14428 ENDPROC(__twofish_dec_blk8)
14429
14430@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
14431
14432 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14433
14434+ pax_force_retaddr
14435 ret;
14436 ENDPROC(twofish_ecb_enc_8way)
14437
14438@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
14439
14440 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14441
14442+ pax_force_retaddr
14443 ret;
14444 ENDPROC(twofish_ecb_dec_8way)
14445
14446@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
14447 * %rdx: src
14448 */
14449
14450- pushq %r12;
14451+ pushq %r14;
14452
14453 movq %rsi, %r11;
14454- movq %rdx, %r12;
14455+ movq %rdx, %r14;
14456
14457 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14458
14459 call __twofish_dec_blk8;
14460
14461- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14462+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14463
14464- popq %r12;
14465+ popq %r14;
14466
14467+ pax_force_retaddr
14468 ret;
14469 ENDPROC(twofish_cbc_dec_8way)
14470
14471@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
14472 * %rcx: iv (little endian, 128bit)
14473 */
14474
14475- pushq %r12;
14476+ pushq %r14;
14477
14478 movq %rsi, %r11;
14479- movq %rdx, %r12;
14480+ movq %rdx, %r14;
14481
14482 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14483 RD2, RX0, RX1, RY0);
14484
14485 call __twofish_enc_blk8;
14486
14487- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14488+ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14489
14490- popq %r12;
14491+ popq %r14;
14492
14493+ pax_force_retaddr
14494 ret;
14495 ENDPROC(twofish_ctr_8way)
14496
14497@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
14498 /* dst <= regs xor IVs(in dst) */
14499 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14500
14501+ pax_force_retaddr
14502 ret;
14503 ENDPROC(twofish_xts_enc_8way)
14504
14505@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
14506 /* dst <= regs xor IVs(in dst) */
14507 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14508
14509+ pax_force_retaddr
14510 ret;
14511 ENDPROC(twofish_xts_dec_8way)
14512diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14513index 1c3b7ce..02f578d 100644
14514--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14515+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14516@@ -21,6 +21,7 @@
14517 */
14518
14519 #include <linux/linkage.h>
14520+#include <asm/alternative-asm.h>
14521
14522 .file "twofish-x86_64-asm-3way.S"
14523 .text
14524@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
14525 popq %r13;
14526 popq %r14;
14527 popq %r15;
14528+ pax_force_retaddr
14529 ret;
14530
14531 .L__enc_xor3:
14532@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
14533 popq %r13;
14534 popq %r14;
14535 popq %r15;
14536+ pax_force_retaddr
14537 ret;
14538 ENDPROC(__twofish_enc_blk_3way)
14539
14540@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
14541 popq %r13;
14542 popq %r14;
14543 popq %r15;
14544+ pax_force_retaddr
14545 ret;
14546 ENDPROC(twofish_dec_blk_3way)
14547diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
14548index a039d21..524b8b2 100644
14549--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
14550+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
14551@@ -22,6 +22,7 @@
14552
14553 #include <linux/linkage.h>
14554 #include <asm/asm-offsets.h>
14555+#include <asm/alternative-asm.h>
14556
14557 #define a_offset 0
14558 #define b_offset 4
14559@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
14560
14561 popq R1
14562 movq $1,%rax
14563+ pax_force_retaddr
14564 ret
14565 ENDPROC(twofish_enc_blk)
14566
14567@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
14568
14569 popq R1
14570 movq $1,%rax
14571+ pax_force_retaddr
14572 ret
14573 ENDPROC(twofish_dec_blk)
14574diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
14575index ae6aad1..719d6d9 100644
14576--- a/arch/x86/ia32/ia32_aout.c
14577+++ b/arch/x86/ia32/ia32_aout.c
14578@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
14579 unsigned long dump_start, dump_size;
14580 struct user32 dump;
14581
14582+ memset(&dump, 0, sizeof(dump));
14583+
14584 fs = get_fs();
14585 set_fs(KERNEL_DS);
14586 has_dumped = 1;
14587diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
14588index d0165c9..0d5639b 100644
14589--- a/arch/x86/ia32/ia32_signal.c
14590+++ b/arch/x86/ia32/ia32_signal.c
14591@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
14592 if (__get_user(set.sig[0], &frame->sc.oldmask)
14593 || (_COMPAT_NSIG_WORDS > 1
14594 && __copy_from_user((((char *) &set.sig) + 4),
14595- &frame->extramask,
14596+ frame->extramask,
14597 sizeof(frame->extramask))))
14598 goto badframe;
14599
14600@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
14601 sp -= frame_size;
14602 /* Align the stack pointer according to the i386 ABI,
14603 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
14604- sp = ((sp + 4) & -16ul) - 4;
14605+ sp = ((sp - 12) & -16ul) - 4;
14606 return (void __user *) sp;
14607 }
14608
14609@@ -383,10 +383,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14610 } else {
14611 /* Return stub is in 32bit vsyscall page */
14612 if (current->mm->context.vdso)
14613- restorer = current->mm->context.vdso +
14614- selected_vdso32->sym___kernel_sigreturn;
14615+ restorer = (void __force_user *)(current->mm->context.vdso +
14616+ selected_vdso32->sym___kernel_sigreturn);
14617 else
14618- restorer = &frame->retcode;
14619+ restorer = frame->retcode;
14620 }
14621
14622 put_user_try {
14623@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14624 * These are actually not used anymore, but left because some
14625 * gdb versions depend on them as a marker.
14626 */
14627- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14628+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14629 } put_user_catch(err);
14630
14631 if (err)
14632@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14633 0xb8,
14634 __NR_ia32_rt_sigreturn,
14635 0x80cd,
14636- 0,
14637+ 0
14638 };
14639
14640 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
14641@@ -461,16 +461,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14642
14643 if (ksig->ka.sa.sa_flags & SA_RESTORER)
14644 restorer = ksig->ka.sa.sa_restorer;
14645+ else if (current->mm->context.vdso)
14646+ /* Return stub is in 32bit vsyscall page */
14647+ restorer = (void __force_user *)(current->mm->context.vdso +
14648+ selected_vdso32->sym___kernel_rt_sigreturn);
14649 else
14650- restorer = current->mm->context.vdso +
14651- selected_vdso32->sym___kernel_rt_sigreturn;
14652+ restorer = frame->retcode;
14653 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
14654
14655 /*
14656 * Not actually used anymore, but left because some gdb
14657 * versions need it.
14658 */
14659- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14660+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14661 } put_user_catch(err);
14662
14663 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
14664diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
14665index 156ebca..9591cf0 100644
14666--- a/arch/x86/ia32/ia32entry.S
14667+++ b/arch/x86/ia32/ia32entry.S
14668@@ -15,8 +15,10 @@
14669 #include <asm/irqflags.h>
14670 #include <asm/asm.h>
14671 #include <asm/smap.h>
14672+#include <asm/pgtable.h>
14673 #include <linux/linkage.h>
14674 #include <linux/err.h>
14675+#include <asm/alternative-asm.h>
14676
14677 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
14678 #include <linux/elf-em.h>
14679@@ -62,12 +64,12 @@
14680 */
14681 .macro LOAD_ARGS32 offset, _r9=0
14682 .if \_r9
14683- movl \offset+16(%rsp),%r9d
14684+ movl \offset+R9(%rsp),%r9d
14685 .endif
14686- movl \offset+40(%rsp),%ecx
14687- movl \offset+48(%rsp),%edx
14688- movl \offset+56(%rsp),%esi
14689- movl \offset+64(%rsp),%edi
14690+ movl \offset+RCX(%rsp),%ecx
14691+ movl \offset+RDX(%rsp),%edx
14692+ movl \offset+RSI(%rsp),%esi
14693+ movl \offset+RDI(%rsp),%edi
14694 movl %eax,%eax /* zero extension */
14695 .endm
14696
14697@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
14698 ENDPROC(native_irq_enable_sysexit)
14699 #endif
14700
14701+ .macro pax_enter_kernel_user
14702+ pax_set_fptr_mask
14703+#ifdef CONFIG_PAX_MEMORY_UDEREF
14704+ call pax_enter_kernel_user
14705+#endif
14706+ .endm
14707+
14708+ .macro pax_exit_kernel_user
14709+#ifdef CONFIG_PAX_MEMORY_UDEREF
14710+ call pax_exit_kernel_user
14711+#endif
14712+#ifdef CONFIG_PAX_RANDKSTACK
14713+ pushq %rax
14714+ pushq %r11
14715+ call pax_randomize_kstack
14716+ popq %r11
14717+ popq %rax
14718+#endif
14719+ .endm
14720+
14721+ .macro pax_erase_kstack
14722+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14723+ call pax_erase_kstack
14724+#endif
14725+ .endm
14726+
14727 /*
14728 * 32bit SYSENTER instruction entry.
14729 *
14730@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
14731 CFI_REGISTER rsp,rbp
14732 SWAPGS_UNSAFE_STACK
14733 movq PER_CPU_VAR(kernel_stack), %rsp
14734- addq $(KERNEL_STACK_OFFSET),%rsp
14735- /*
14736- * No need to follow this irqs on/off section: the syscall
14737- * disabled irqs, here we enable it straight after entry:
14738- */
14739- ENABLE_INTERRUPTS(CLBR_NONE)
14740 movl %ebp,%ebp /* zero extension */
14741 pushq_cfi $__USER32_DS
14742 /*CFI_REL_OFFSET ss,0*/
14743@@ -135,23 +157,46 @@ ENTRY(ia32_sysenter_target)
14744 CFI_REL_OFFSET rsp,0
14745 pushfq_cfi
14746 /*CFI_REL_OFFSET rflags,0*/
14747- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
14748- CFI_REGISTER rip,r10
14749+ orl $X86_EFLAGS_IF,(%rsp)
14750+ GET_THREAD_INFO(%r11)
14751+ movl TI_sysenter_return(%r11), %r11d
14752+ CFI_REGISTER rip,r11
14753 pushq_cfi $__USER32_CS
14754 /*CFI_REL_OFFSET cs,0*/
14755 movl %eax, %eax
14756- pushq_cfi %r10
14757+ pushq_cfi %r11
14758 CFI_REL_OFFSET rip,0
14759 pushq_cfi %rax
14760 cld
14761 SAVE_ARGS 0,1,0
14762+ pax_enter_kernel_user
14763+
14764+#ifdef CONFIG_PAX_RANDKSTACK
14765+ pax_erase_kstack
14766+#endif
14767+
14768+ /*
14769+ * No need to follow this irqs on/off section: the syscall
14770+ * disabled irqs, here we enable it straight after entry:
14771+ */
14772+ ENABLE_INTERRUPTS(CLBR_NONE)
14773 /* no need to do an access_ok check here because rbp has been
14774 32bit zero extended */
14775+
14776+#ifdef CONFIG_PAX_MEMORY_UDEREF
14777+ addq pax_user_shadow_base,%rbp
14778+ ASM_PAX_OPEN_USERLAND
14779+#endif
14780+
14781 ASM_STAC
14782 1: movl (%rbp),%ebp
14783 _ASM_EXTABLE(1b,ia32_badarg)
14784 ASM_CLAC
14785
14786+#ifdef CONFIG_PAX_MEMORY_UDEREF
14787+ ASM_PAX_CLOSE_USERLAND
14788+#endif
14789+
14790 /*
14791 * Sysenter doesn't filter flags, so we need to clear NT
14792 * ourselves. To save a few cycles, we can check whether
14793@@ -161,8 +206,9 @@ ENTRY(ia32_sysenter_target)
14794 jnz sysenter_fix_flags
14795 sysenter_flags_fixed:
14796
14797- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14798- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14799+ GET_THREAD_INFO(%r11)
14800+ orl $TS_COMPAT,TI_status(%r11)
14801+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14802 CFI_REMEMBER_STATE
14803 jnz sysenter_tracesys
14804 cmpq $(IA32_NR_syscalls-1),%rax
14805@@ -172,14 +218,17 @@ sysenter_do_call:
14806 sysenter_dispatch:
14807 call *ia32_sys_call_table(,%rax,8)
14808 movq %rax,RAX-ARGOFFSET(%rsp)
14809+ GET_THREAD_INFO(%r11)
14810 DISABLE_INTERRUPTS(CLBR_NONE)
14811 TRACE_IRQS_OFF
14812- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14813+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14814 jnz sysexit_audit
14815 sysexit_from_sys_call:
14816- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14817+ pax_exit_kernel_user
14818+ pax_erase_kstack
14819+ andl $~TS_COMPAT,TI_status(%r11)
14820 /* clear IF, that popfq doesn't enable interrupts early */
14821- andl $~0x200,EFLAGS-ARGOFFSET(%rsp)
14822+ andl $~X86_EFLAGS_IF,EFLAGS-ARGOFFSET(%rsp)
14823 movl RIP-ARGOFFSET(%rsp),%edx /* User %eip */
14824 CFI_REGISTER rip,rdx
14825 RESTORE_ARGS 0,24,0,0,0,0
14826@@ -205,6 +254,9 @@ sysexit_from_sys_call:
14827 movl %ebx,%esi /* 2nd arg: 1st syscall arg */
14828 movl %eax,%edi /* 1st arg: syscall number */
14829 call __audit_syscall_entry
14830+
14831+ pax_erase_kstack
14832+
14833 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
14834 cmpq $(IA32_NR_syscalls-1),%rax
14835 ja ia32_badsys
14836@@ -216,7 +268,7 @@ sysexit_from_sys_call:
14837 .endm
14838
14839 .macro auditsys_exit exit
14840- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14841+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14842 jnz ia32_ret_from_sys_call
14843 TRACE_IRQS_ON
14844 ENABLE_INTERRUPTS(CLBR_NONE)
14845@@ -227,11 +279,12 @@ sysexit_from_sys_call:
14846 1: setbe %al /* 1 if error, 0 if not */
14847 movzbl %al,%edi /* zero-extend that into %edi */
14848 call __audit_syscall_exit
14849+ GET_THREAD_INFO(%r11)
14850 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
14851 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
14852 DISABLE_INTERRUPTS(CLBR_NONE)
14853 TRACE_IRQS_OFF
14854- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14855+ testl %edi,TI_flags(%r11)
14856 jz \exit
14857 CLEAR_RREGS -ARGOFFSET
14858 jmp int_with_check
14859@@ -253,7 +306,7 @@ sysenter_fix_flags:
14860
14861 sysenter_tracesys:
14862 #ifdef CONFIG_AUDITSYSCALL
14863- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14864+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14865 jz sysenter_auditsys
14866 #endif
14867 SAVE_REST
14868@@ -265,6 +318,9 @@ sysenter_tracesys:
14869 RESTORE_REST
14870 cmpq $(IA32_NR_syscalls-1),%rax
14871 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
14872+
14873+ pax_erase_kstack
14874+
14875 jmp sysenter_do_call
14876 CFI_ENDPROC
14877 ENDPROC(ia32_sysenter_target)
14878@@ -292,19 +348,25 @@ ENDPROC(ia32_sysenter_target)
14879 ENTRY(ia32_cstar_target)
14880 CFI_STARTPROC32 simple
14881 CFI_SIGNAL_FRAME
14882- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14883+ CFI_DEF_CFA rsp,0
14884 CFI_REGISTER rip,rcx
14885 /*CFI_REGISTER rflags,r11*/
14886 SWAPGS_UNSAFE_STACK
14887 movl %esp,%r8d
14888 CFI_REGISTER rsp,r8
14889 movq PER_CPU_VAR(kernel_stack),%rsp
14890+ SAVE_ARGS 8*6,0,0
14891+ pax_enter_kernel_user
14892+
14893+#ifdef CONFIG_PAX_RANDKSTACK
14894+ pax_erase_kstack
14895+#endif
14896+
14897 /*
14898 * No need to follow this irqs on/off section: the syscall
14899 * disabled irqs and here we enable it straight after entry:
14900 */
14901 ENABLE_INTERRUPTS(CLBR_NONE)
14902- SAVE_ARGS 8,0,0
14903 movl %eax,%eax /* zero extension */
14904 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14905 movq %rcx,RIP-ARGOFFSET(%rsp)
14906@@ -320,12 +382,25 @@ ENTRY(ia32_cstar_target)
14907 /* no need to do an access_ok check here because r8 has been
14908 32bit zero extended */
14909 /* hardware stack frame is complete now */
14910+
14911+#ifdef CONFIG_PAX_MEMORY_UDEREF
14912+ ASM_PAX_OPEN_USERLAND
14913+ movq pax_user_shadow_base,%r8
14914+ addq RSP-ARGOFFSET(%rsp),%r8
14915+#endif
14916+
14917 ASM_STAC
14918 1: movl (%r8),%r9d
14919 _ASM_EXTABLE(1b,ia32_badarg)
14920 ASM_CLAC
14921- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14922- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14923+
14924+#ifdef CONFIG_PAX_MEMORY_UDEREF
14925+ ASM_PAX_CLOSE_USERLAND
14926+#endif
14927+
14928+ GET_THREAD_INFO(%r11)
14929+ orl $TS_COMPAT,TI_status(%r11)
14930+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14931 CFI_REMEMBER_STATE
14932 jnz cstar_tracesys
14933 cmpq $IA32_NR_syscalls-1,%rax
14934@@ -335,13 +410,16 @@ cstar_do_call:
14935 cstar_dispatch:
14936 call *ia32_sys_call_table(,%rax,8)
14937 movq %rax,RAX-ARGOFFSET(%rsp)
14938+ GET_THREAD_INFO(%r11)
14939 DISABLE_INTERRUPTS(CLBR_NONE)
14940 TRACE_IRQS_OFF
14941- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14942+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14943 jnz sysretl_audit
14944 sysretl_from_sys_call:
14945- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14946- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
14947+ pax_exit_kernel_user
14948+ pax_erase_kstack
14949+ andl $~TS_COMPAT,TI_status(%r11)
14950+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
14951 movl RIP-ARGOFFSET(%rsp),%ecx
14952 CFI_REGISTER rip,rcx
14953 movl EFLAGS-ARGOFFSET(%rsp),%r11d
14954@@ -368,7 +446,7 @@ sysretl_audit:
14955
14956 cstar_tracesys:
14957 #ifdef CONFIG_AUDITSYSCALL
14958- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14959+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14960 jz cstar_auditsys
14961 #endif
14962 xchgl %r9d,%ebp
14963@@ -382,11 +460,19 @@ cstar_tracesys:
14964 xchgl %ebp,%r9d
14965 cmpq $(IA32_NR_syscalls-1),%rax
14966 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
14967+
14968+ pax_erase_kstack
14969+
14970 jmp cstar_do_call
14971 END(ia32_cstar_target)
14972
14973 ia32_badarg:
14974 ASM_CLAC
14975+
14976+#ifdef CONFIG_PAX_MEMORY_UDEREF
14977+ ASM_PAX_CLOSE_USERLAND
14978+#endif
14979+
14980 movq $-EFAULT,%rax
14981 jmp ia32_sysret
14982 CFI_ENDPROC
14983@@ -423,19 +509,26 @@ ENTRY(ia32_syscall)
14984 CFI_REL_OFFSET rip,RIP-RIP
14985 PARAVIRT_ADJUST_EXCEPTION_FRAME
14986 SWAPGS
14987- /*
14988- * No need to follow this irqs on/off section: the syscall
14989- * disabled irqs and here we enable it straight after entry:
14990- */
14991- ENABLE_INTERRUPTS(CLBR_NONE)
14992 movl %eax,%eax
14993 pushq_cfi %rax
14994 cld
14995 /* note the registers are not zero extended to the sf.
14996 this could be a problem. */
14997 SAVE_ARGS 0,1,0
14998- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14999- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15000+ pax_enter_kernel_user
15001+
15002+#ifdef CONFIG_PAX_RANDKSTACK
15003+ pax_erase_kstack
15004+#endif
15005+
15006+ /*
15007+ * No need to follow this irqs on/off section: the syscall
15008+ * disabled irqs and here we enable it straight after entry:
15009+ */
15010+ ENABLE_INTERRUPTS(CLBR_NONE)
15011+ GET_THREAD_INFO(%r11)
15012+ orl $TS_COMPAT,TI_status(%r11)
15013+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15014 jnz ia32_tracesys
15015 cmpq $(IA32_NR_syscalls-1),%rax
15016 ja ia32_badsys
15017@@ -458,6 +551,9 @@ ia32_tracesys:
15018 RESTORE_REST
15019 cmpq $(IA32_NR_syscalls-1),%rax
15020 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
15021+
15022+ pax_erase_kstack
15023+
15024 jmp ia32_do_call
15025 END(ia32_syscall)
15026
15027diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
15028index 8e0ceec..af13504 100644
15029--- a/arch/x86/ia32/sys_ia32.c
15030+++ b/arch/x86/ia32/sys_ia32.c
15031@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
15032 */
15033 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
15034 {
15035- typeof(ubuf->st_uid) uid = 0;
15036- typeof(ubuf->st_gid) gid = 0;
15037+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
15038+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
15039 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
15040 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
15041 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
15042diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
15043index 372231c..51b537d 100644
15044--- a/arch/x86/include/asm/alternative-asm.h
15045+++ b/arch/x86/include/asm/alternative-asm.h
15046@@ -18,6 +18,45 @@
15047 .endm
15048 #endif
15049
15050+#ifdef KERNEXEC_PLUGIN
15051+ .macro pax_force_retaddr_bts rip=0
15052+ btsq $63,\rip(%rsp)
15053+ .endm
15054+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
15055+ .macro pax_force_retaddr rip=0, reload=0
15056+ btsq $63,\rip(%rsp)
15057+ .endm
15058+ .macro pax_force_fptr ptr
15059+ btsq $63,\ptr
15060+ .endm
15061+ .macro pax_set_fptr_mask
15062+ .endm
15063+#endif
15064+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15065+ .macro pax_force_retaddr rip=0, reload=0
15066+ .if \reload
15067+ pax_set_fptr_mask
15068+ .endif
15069+ orq %r12,\rip(%rsp)
15070+ .endm
15071+ .macro pax_force_fptr ptr
15072+ orq %r12,\ptr
15073+ .endm
15074+ .macro pax_set_fptr_mask
15075+ movabs $0x8000000000000000,%r12
15076+ .endm
15077+#endif
15078+#else
15079+ .macro pax_force_retaddr rip=0, reload=0
15080+ .endm
15081+ .macro pax_force_fptr ptr
15082+ .endm
15083+ .macro pax_force_retaddr_bts rip=0
15084+ .endm
15085+ .macro pax_set_fptr_mask
15086+ .endm
15087+#endif
15088+
15089 .macro altinstruction_entry orig alt feature orig_len alt_len
15090 .long \orig - .
15091 .long \alt - .
15092diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
15093index 473bdbe..b1e3377 100644
15094--- a/arch/x86/include/asm/alternative.h
15095+++ b/arch/x86/include/asm/alternative.h
15096@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15097 ".pushsection .discard,\"aw\",@progbits\n" \
15098 DISCARD_ENTRY(1) \
15099 ".popsection\n" \
15100- ".pushsection .altinstr_replacement, \"ax\"\n" \
15101+ ".pushsection .altinstr_replacement, \"a\"\n" \
15102 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
15103 ".popsection"
15104
15105@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15106 DISCARD_ENTRY(1) \
15107 DISCARD_ENTRY(2) \
15108 ".popsection\n" \
15109- ".pushsection .altinstr_replacement, \"ax\"\n" \
15110+ ".pushsection .altinstr_replacement, \"a\"\n" \
15111 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
15112 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
15113 ".popsection"
15114diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
15115index efc3b22..85c4f3a 100644
15116--- a/arch/x86/include/asm/apic.h
15117+++ b/arch/x86/include/asm/apic.h
15118@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
15119
15120 #ifdef CONFIG_X86_LOCAL_APIC
15121
15122-extern unsigned int apic_verbosity;
15123+extern int apic_verbosity;
15124 extern int local_apic_timer_c2_ok;
15125
15126 extern int disable_apic;
15127diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
15128index 20370c6..a2eb9b0 100644
15129--- a/arch/x86/include/asm/apm.h
15130+++ b/arch/x86/include/asm/apm.h
15131@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
15132 __asm__ __volatile__(APM_DO_ZERO_SEGS
15133 "pushl %%edi\n\t"
15134 "pushl %%ebp\n\t"
15135- "lcall *%%cs:apm_bios_entry\n\t"
15136+ "lcall *%%ss:apm_bios_entry\n\t"
15137 "setc %%al\n\t"
15138 "popl %%ebp\n\t"
15139 "popl %%edi\n\t"
15140@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
15141 __asm__ __volatile__(APM_DO_ZERO_SEGS
15142 "pushl %%edi\n\t"
15143 "pushl %%ebp\n\t"
15144- "lcall *%%cs:apm_bios_entry\n\t"
15145+ "lcall *%%ss:apm_bios_entry\n\t"
15146 "setc %%bl\n\t"
15147 "popl %%ebp\n\t"
15148 "popl %%edi\n\t"
15149diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
15150index 5e5cd12..51cdc93 100644
15151--- a/arch/x86/include/asm/atomic.h
15152+++ b/arch/x86/include/asm/atomic.h
15153@@ -28,6 +28,17 @@ static inline int atomic_read(const atomic_t *v)
15154 }
15155
15156 /**
15157+ * atomic_read_unchecked - read atomic variable
15158+ * @v: pointer of type atomic_unchecked_t
15159+ *
15160+ * Atomically reads the value of @v.
15161+ */
15162+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
15163+{
15164+ return ACCESS_ONCE((v)->counter);
15165+}
15166+
15167+/**
15168 * atomic_set - set atomic variable
15169 * @v: pointer of type atomic_t
15170 * @i: required value
15171@@ -40,6 +51,18 @@ static inline void atomic_set(atomic_t *v, int i)
15172 }
15173
15174 /**
15175+ * atomic_set_unchecked - set atomic variable
15176+ * @v: pointer of type atomic_unchecked_t
15177+ * @i: required value
15178+ *
15179+ * Atomically sets the value of @v to @i.
15180+ */
15181+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
15182+{
15183+ v->counter = i;
15184+}
15185+
15186+/**
15187 * atomic_add - add integer to atomic variable
15188 * @i: integer value to add
15189 * @v: pointer of type atomic_t
15190@@ -48,7 +71,29 @@ static inline void atomic_set(atomic_t *v, int i)
15191 */
15192 static inline void atomic_add(int i, atomic_t *v)
15193 {
15194- asm volatile(LOCK_PREFIX "addl %1,%0"
15195+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15196+
15197+#ifdef CONFIG_PAX_REFCOUNT
15198+ "jno 0f\n"
15199+ LOCK_PREFIX "subl %1,%0\n"
15200+ "int $4\n0:\n"
15201+ _ASM_EXTABLE(0b, 0b)
15202+#endif
15203+
15204+ : "+m" (v->counter)
15205+ : "ir" (i));
15206+}
15207+
15208+/**
15209+ * atomic_add_unchecked - add integer to atomic variable
15210+ * @i: integer value to add
15211+ * @v: pointer of type atomic_unchecked_t
15212+ *
15213+ * Atomically adds @i to @v.
15214+ */
15215+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
15216+{
15217+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15218 : "+m" (v->counter)
15219 : "ir" (i));
15220 }
15221@@ -62,7 +107,29 @@ static inline void atomic_add(int i, atomic_t *v)
15222 */
15223 static inline void atomic_sub(int i, atomic_t *v)
15224 {
15225- asm volatile(LOCK_PREFIX "subl %1,%0"
15226+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15227+
15228+#ifdef CONFIG_PAX_REFCOUNT
15229+ "jno 0f\n"
15230+ LOCK_PREFIX "addl %1,%0\n"
15231+ "int $4\n0:\n"
15232+ _ASM_EXTABLE(0b, 0b)
15233+#endif
15234+
15235+ : "+m" (v->counter)
15236+ : "ir" (i));
15237+}
15238+
15239+/**
15240+ * atomic_sub_unchecked - subtract integer from atomic variable
15241+ * @i: integer value to subtract
15242+ * @v: pointer of type atomic_unchecked_t
15243+ *
15244+ * Atomically subtracts @i from @v.
15245+ */
15246+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
15247+{
15248+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15249 : "+m" (v->counter)
15250 : "ir" (i));
15251 }
15252@@ -78,7 +145,7 @@ static inline void atomic_sub(int i, atomic_t *v)
15253 */
15254 static inline int atomic_sub_and_test(int i, atomic_t *v)
15255 {
15256- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
15257+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
15258 }
15259
15260 /**
15261@@ -89,7 +156,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
15262 */
15263 static inline void atomic_inc(atomic_t *v)
15264 {
15265- asm volatile(LOCK_PREFIX "incl %0"
15266+ asm volatile(LOCK_PREFIX "incl %0\n"
15267+
15268+#ifdef CONFIG_PAX_REFCOUNT
15269+ "jno 0f\n"
15270+ LOCK_PREFIX "decl %0\n"
15271+ "int $4\n0:\n"
15272+ _ASM_EXTABLE(0b, 0b)
15273+#endif
15274+
15275+ : "+m" (v->counter));
15276+}
15277+
15278+/**
15279+ * atomic_inc_unchecked - increment atomic variable
15280+ * @v: pointer of type atomic_unchecked_t
15281+ *
15282+ * Atomically increments @v by 1.
15283+ */
15284+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
15285+{
15286+ asm volatile(LOCK_PREFIX "incl %0\n"
15287 : "+m" (v->counter));
15288 }
15289
15290@@ -101,7 +188,27 @@ static inline void atomic_inc(atomic_t *v)
15291 */
15292 static inline void atomic_dec(atomic_t *v)
15293 {
15294- asm volatile(LOCK_PREFIX "decl %0"
15295+ asm volatile(LOCK_PREFIX "decl %0\n"
15296+
15297+#ifdef CONFIG_PAX_REFCOUNT
15298+ "jno 0f\n"
15299+ LOCK_PREFIX "incl %0\n"
15300+ "int $4\n0:\n"
15301+ _ASM_EXTABLE(0b, 0b)
15302+#endif
15303+
15304+ : "+m" (v->counter));
15305+}
15306+
15307+/**
15308+ * atomic_dec_unchecked - decrement atomic variable
15309+ * @v: pointer of type atomic_unchecked_t
15310+ *
15311+ * Atomically decrements @v by 1.
15312+ */
15313+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
15314+{
15315+ asm volatile(LOCK_PREFIX "decl %0\n"
15316 : "+m" (v->counter));
15317 }
15318
15319@@ -115,7 +222,7 @@ static inline void atomic_dec(atomic_t *v)
15320 */
15321 static inline int atomic_dec_and_test(atomic_t *v)
15322 {
15323- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
15324+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
15325 }
15326
15327 /**
15328@@ -128,7 +235,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
15329 */
15330 static inline int atomic_inc_and_test(atomic_t *v)
15331 {
15332- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
15333+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
15334+}
15335+
15336+/**
15337+ * atomic_inc_and_test_unchecked - increment and test
15338+ * @v: pointer of type atomic_unchecked_t
15339+ *
15340+ * Atomically increments @v by 1
15341+ * and returns true if the result is zero, or false for all
15342+ * other cases.
15343+ */
15344+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
15345+{
15346+ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
15347 }
15348
15349 /**
15350@@ -142,7 +262,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
15351 */
15352 static inline int atomic_add_negative(int i, atomic_t *v)
15353 {
15354- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
15355+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
15356 }
15357
15358 /**
15359@@ -152,7 +272,19 @@ static inline int atomic_add_negative(int i, atomic_t *v)
15360 *
15361 * Atomically adds @i to @v and returns @i + @v
15362 */
15363-static inline int atomic_add_return(int i, atomic_t *v)
15364+static inline int __intentional_overflow(-1) atomic_add_return(int i, atomic_t *v)
15365+{
15366+ return i + xadd_check_overflow(&v->counter, i);
15367+}
15368+
15369+/**
15370+ * atomic_add_return_unchecked - add integer and return
15371+ * @i: integer value to add
15372+ * @v: pointer of type atomic_unchecked_t
15373+ *
15374+ * Atomically adds @i to @v and returns @i + @v
15375+ */
15376+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
15377 {
15378 return i + xadd(&v->counter, i);
15379 }
15380@@ -164,15 +296,24 @@ static inline int atomic_add_return(int i, atomic_t *v)
15381 *
15382 * Atomically subtracts @i from @v and returns @v - @i
15383 */
15384-static inline int atomic_sub_return(int i, atomic_t *v)
15385+static inline int __intentional_overflow(-1) atomic_sub_return(int i, atomic_t *v)
15386 {
15387 return atomic_add_return(-i, v);
15388 }
15389
15390 #define atomic_inc_return(v) (atomic_add_return(1, v))
15391+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
15392+{
15393+ return atomic_add_return_unchecked(1, v);
15394+}
15395 #define atomic_dec_return(v) (atomic_sub_return(1, v))
15396
15397-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
15398+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
15399+{
15400+ return cmpxchg(&v->counter, old, new);
15401+}
15402+
15403+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
15404 {
15405 return cmpxchg(&v->counter, old, new);
15406 }
15407@@ -182,6 +323,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
15408 return xchg(&v->counter, new);
15409 }
15410
15411+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
15412+{
15413+ return xchg(&v->counter, new);
15414+}
15415+
15416 /**
15417 * __atomic_add_unless - add unless the number is already a given value
15418 * @v: pointer of type atomic_t
15419@@ -193,12 +339,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
15420 */
15421 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15422 {
15423- int c, old;
15424+ int c, old, new;
15425 c = atomic_read(v);
15426 for (;;) {
15427- if (unlikely(c == (u)))
15428+ if (unlikely(c == u))
15429 break;
15430- old = atomic_cmpxchg((v), c, c + (a));
15431+
15432+ asm volatile("addl %2,%0\n"
15433+
15434+#ifdef CONFIG_PAX_REFCOUNT
15435+ "jno 0f\n"
15436+ "subl %2,%0\n"
15437+ "int $4\n0:\n"
15438+ _ASM_EXTABLE(0b, 0b)
15439+#endif
15440+
15441+ : "=r" (new)
15442+ : "0" (c), "ir" (a));
15443+
15444+ old = atomic_cmpxchg(v, c, new);
15445 if (likely(old == c))
15446 break;
15447 c = old;
15448@@ -207,6 +366,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15449 }
15450
15451 /**
15452+ * atomic_inc_not_zero_hint - increment if not null
15453+ * @v: pointer of type atomic_t
15454+ * @hint: probable value of the atomic before the increment
15455+ *
15456+ * This version of atomic_inc_not_zero() gives a hint of probable
15457+ * value of the atomic. This helps processor to not read the memory
15458+ * before doing the atomic read/modify/write cycle, lowering
15459+ * number of bus transactions on some arches.
15460+ *
15461+ * Returns: 0 if increment was not done, 1 otherwise.
15462+ */
15463+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
15464+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
15465+{
15466+ int val, c = hint, new;
15467+
15468+ /* sanity test, should be removed by compiler if hint is a constant */
15469+ if (!hint)
15470+ return __atomic_add_unless(v, 1, 0);
15471+
15472+ do {
15473+ asm volatile("incl %0\n"
15474+
15475+#ifdef CONFIG_PAX_REFCOUNT
15476+ "jno 0f\n"
15477+ "decl %0\n"
15478+ "int $4\n0:\n"
15479+ _ASM_EXTABLE(0b, 0b)
15480+#endif
15481+
15482+ : "=r" (new)
15483+ : "0" (c));
15484+
15485+ val = atomic_cmpxchg(v, c, new);
15486+ if (val == c)
15487+ return 1;
15488+ c = val;
15489+ } while (c);
15490+
15491+ return 0;
15492+}
15493+
15494+/**
15495 * atomic_inc_short - increment of a short integer
15496 * @v: pointer to type int
15497 *
15498@@ -220,14 +422,37 @@ static inline short int atomic_inc_short(short int *v)
15499 }
15500
15501 /* These are x86-specific, used by some header files */
15502-#define atomic_clear_mask(mask, addr) \
15503- asm volatile(LOCK_PREFIX "andl %0,%1" \
15504- : : "r" (~(mask)), "m" (*(addr)) : "memory")
15505+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
15506+{
15507+ asm volatile(LOCK_PREFIX "andl %1,%0"
15508+ : "+m" (v->counter)
15509+ : "r" (~(mask))
15510+ : "memory");
15511+}
15512
15513-#define atomic_set_mask(mask, addr) \
15514- asm volatile(LOCK_PREFIX "orl %0,%1" \
15515- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
15516- : "memory")
15517+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15518+{
15519+ asm volatile(LOCK_PREFIX "andl %1,%0"
15520+ : "+m" (v->counter)
15521+ : "r" (~(mask))
15522+ : "memory");
15523+}
15524+
15525+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
15526+{
15527+ asm volatile(LOCK_PREFIX "orl %1,%0"
15528+ : "+m" (v->counter)
15529+ : "r" (mask)
15530+ : "memory");
15531+}
15532+
15533+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15534+{
15535+ asm volatile(LOCK_PREFIX "orl %1,%0"
15536+ : "+m" (v->counter)
15537+ : "r" (mask)
15538+ : "memory");
15539+}
15540
15541 #ifdef CONFIG_X86_32
15542 # include <asm/atomic64_32.h>
15543diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
15544index b154de7..bf18a5a 100644
15545--- a/arch/x86/include/asm/atomic64_32.h
15546+++ b/arch/x86/include/asm/atomic64_32.h
15547@@ -12,6 +12,14 @@ typedef struct {
15548 u64 __aligned(8) counter;
15549 } atomic64_t;
15550
15551+#ifdef CONFIG_PAX_REFCOUNT
15552+typedef struct {
15553+ u64 __aligned(8) counter;
15554+} atomic64_unchecked_t;
15555+#else
15556+typedef atomic64_t atomic64_unchecked_t;
15557+#endif
15558+
15559 #define ATOMIC64_INIT(val) { (val) }
15560
15561 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
15562@@ -37,21 +45,31 @@ typedef struct {
15563 ATOMIC64_DECL_ONE(sym##_386)
15564
15565 ATOMIC64_DECL_ONE(add_386);
15566+ATOMIC64_DECL_ONE(add_unchecked_386);
15567 ATOMIC64_DECL_ONE(sub_386);
15568+ATOMIC64_DECL_ONE(sub_unchecked_386);
15569 ATOMIC64_DECL_ONE(inc_386);
15570+ATOMIC64_DECL_ONE(inc_unchecked_386);
15571 ATOMIC64_DECL_ONE(dec_386);
15572+ATOMIC64_DECL_ONE(dec_unchecked_386);
15573 #endif
15574
15575 #define alternative_atomic64(f, out, in...) \
15576 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
15577
15578 ATOMIC64_DECL(read);
15579+ATOMIC64_DECL(read_unchecked);
15580 ATOMIC64_DECL(set);
15581+ATOMIC64_DECL(set_unchecked);
15582 ATOMIC64_DECL(xchg);
15583 ATOMIC64_DECL(add_return);
15584+ATOMIC64_DECL(add_return_unchecked);
15585 ATOMIC64_DECL(sub_return);
15586+ATOMIC64_DECL(sub_return_unchecked);
15587 ATOMIC64_DECL(inc_return);
15588+ATOMIC64_DECL(inc_return_unchecked);
15589 ATOMIC64_DECL(dec_return);
15590+ATOMIC64_DECL(dec_return_unchecked);
15591 ATOMIC64_DECL(dec_if_positive);
15592 ATOMIC64_DECL(inc_not_zero);
15593 ATOMIC64_DECL(add_unless);
15594@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
15595 }
15596
15597 /**
15598+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
15599+ * @p: pointer to type atomic64_unchecked_t
15600+ * @o: expected value
15601+ * @n: new value
15602+ *
15603+ * Atomically sets @v to @n if it was equal to @o and returns
15604+ * the old value.
15605+ */
15606+
15607+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
15608+{
15609+ return cmpxchg64(&v->counter, o, n);
15610+}
15611+
15612+/**
15613 * atomic64_xchg - xchg atomic64 variable
15614 * @v: pointer to type atomic64_t
15615 * @n: value to assign
15616@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
15617 }
15618
15619 /**
15620+ * atomic64_set_unchecked - set atomic64 variable
15621+ * @v: pointer to type atomic64_unchecked_t
15622+ * @n: value to assign
15623+ *
15624+ * Atomically sets the value of @v to @n.
15625+ */
15626+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
15627+{
15628+ unsigned high = (unsigned)(i >> 32);
15629+ unsigned low = (unsigned)i;
15630+ alternative_atomic64(set, /* no output */,
15631+ "S" (v), "b" (low), "c" (high)
15632+ : "eax", "edx", "memory");
15633+}
15634+
15635+/**
15636 * atomic64_read - read atomic64 variable
15637 * @v: pointer to type atomic64_t
15638 *
15639@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
15640 }
15641
15642 /**
15643+ * atomic64_read_unchecked - read atomic64 variable
15644+ * @v: pointer to type atomic64_unchecked_t
15645+ *
15646+ * Atomically reads the value of @v and returns it.
15647+ */
15648+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
15649+{
15650+ long long r;
15651+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
15652+ return r;
15653+ }
15654+
15655+/**
15656 * atomic64_add_return - add and return
15657 * @i: integer value to add
15658 * @v: pointer to type atomic64_t
15659@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
15660 return i;
15661 }
15662
15663+/**
15664+ * atomic64_add_return_unchecked - add and return
15665+ * @i: integer value to add
15666+ * @v: pointer to type atomic64_unchecked_t
15667+ *
15668+ * Atomically adds @i to @v and returns @i + *@v
15669+ */
15670+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
15671+{
15672+ alternative_atomic64(add_return_unchecked,
15673+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15674+ ASM_NO_INPUT_CLOBBER("memory"));
15675+ return i;
15676+}
15677+
15678 /*
15679 * Other variants with different arithmetic operators:
15680 */
15681@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
15682 return a;
15683 }
15684
15685+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15686+{
15687+ long long a;
15688+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
15689+ "S" (v) : "memory", "ecx");
15690+ return a;
15691+}
15692+
15693 static inline long long atomic64_dec_return(atomic64_t *v)
15694 {
15695 long long a;
15696@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
15697 }
15698
15699 /**
15700+ * atomic64_add_unchecked - add integer to atomic64 variable
15701+ * @i: integer value to add
15702+ * @v: pointer to type atomic64_unchecked_t
15703+ *
15704+ * Atomically adds @i to @v.
15705+ */
15706+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
15707+{
15708+ __alternative_atomic64(add_unchecked, add_return_unchecked,
15709+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15710+ ASM_NO_INPUT_CLOBBER("memory"));
15711+ return i;
15712+}
15713+
15714+/**
15715 * atomic64_sub - subtract the atomic64 variable
15716 * @i: integer value to subtract
15717 * @v: pointer to type atomic64_t
15718diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
15719index f8d273e..02f39f3 100644
15720--- a/arch/x86/include/asm/atomic64_64.h
15721+++ b/arch/x86/include/asm/atomic64_64.h
15722@@ -22,6 +22,18 @@ static inline long atomic64_read(const atomic64_t *v)
15723 }
15724
15725 /**
15726+ * atomic64_read_unchecked - read atomic64 variable
15727+ * @v: pointer of type atomic64_unchecked_t
15728+ *
15729+ * Atomically reads the value of @v.
15730+ * Doesn't imply a read memory barrier.
15731+ */
15732+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
15733+{
15734+ return ACCESS_ONCE((v)->counter);
15735+}
15736+
15737+/**
15738 * atomic64_set - set atomic64 variable
15739 * @v: pointer to type atomic64_t
15740 * @i: required value
15741@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
15742 }
15743
15744 /**
15745+ * atomic64_set_unchecked - set atomic64 variable
15746+ * @v: pointer to type atomic64_unchecked_t
15747+ * @i: required value
15748+ *
15749+ * Atomically sets the value of @v to @i.
15750+ */
15751+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
15752+{
15753+ v->counter = i;
15754+}
15755+
15756+/**
15757 * atomic64_add - add integer to atomic64 variable
15758 * @i: integer value to add
15759 * @v: pointer to type atomic64_t
15760@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
15761 */
15762 static inline void atomic64_add(long i, atomic64_t *v)
15763 {
15764+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
15765+
15766+#ifdef CONFIG_PAX_REFCOUNT
15767+ "jno 0f\n"
15768+ LOCK_PREFIX "subq %1,%0\n"
15769+ "int $4\n0:\n"
15770+ _ASM_EXTABLE(0b, 0b)
15771+#endif
15772+
15773+ : "=m" (v->counter)
15774+ : "er" (i), "m" (v->counter));
15775+}
15776+
15777+/**
15778+ * atomic64_add_unchecked - add integer to atomic64 variable
15779+ * @i: integer value to add
15780+ * @v: pointer to type atomic64_unchecked_t
15781+ *
15782+ * Atomically adds @i to @v.
15783+ */
15784+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
15785+{
15786 asm volatile(LOCK_PREFIX "addq %1,%0"
15787 : "=m" (v->counter)
15788 : "er" (i), "m" (v->counter));
15789@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
15790 */
15791 static inline void atomic64_sub(long i, atomic64_t *v)
15792 {
15793- asm volatile(LOCK_PREFIX "subq %1,%0"
15794+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15795+
15796+#ifdef CONFIG_PAX_REFCOUNT
15797+ "jno 0f\n"
15798+ LOCK_PREFIX "addq %1,%0\n"
15799+ "int $4\n0:\n"
15800+ _ASM_EXTABLE(0b, 0b)
15801+#endif
15802+
15803+ : "=m" (v->counter)
15804+ : "er" (i), "m" (v->counter));
15805+}
15806+
15807+/**
15808+ * atomic64_sub_unchecked - subtract the atomic64 variable
15809+ * @i: integer value to subtract
15810+ * @v: pointer to type atomic64_unchecked_t
15811+ *
15812+ * Atomically subtracts @i from @v.
15813+ */
15814+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
15815+{
15816+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15817 : "=m" (v->counter)
15818 : "er" (i), "m" (v->counter));
15819 }
15820@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
15821 */
15822 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15823 {
15824- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
15825+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
15826 }
15827
15828 /**
15829@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15830 */
15831 static inline void atomic64_inc(atomic64_t *v)
15832 {
15833+ asm volatile(LOCK_PREFIX "incq %0\n"
15834+
15835+#ifdef CONFIG_PAX_REFCOUNT
15836+ "jno 0f\n"
15837+ LOCK_PREFIX "decq %0\n"
15838+ "int $4\n0:\n"
15839+ _ASM_EXTABLE(0b, 0b)
15840+#endif
15841+
15842+ : "=m" (v->counter)
15843+ : "m" (v->counter));
15844+}
15845+
15846+/**
15847+ * atomic64_inc_unchecked - increment atomic64 variable
15848+ * @v: pointer to type atomic64_unchecked_t
15849+ *
15850+ * Atomically increments @v by 1.
15851+ */
15852+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
15853+{
15854 asm volatile(LOCK_PREFIX "incq %0"
15855 : "=m" (v->counter)
15856 : "m" (v->counter));
15857@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
15858 */
15859 static inline void atomic64_dec(atomic64_t *v)
15860 {
15861- asm volatile(LOCK_PREFIX "decq %0"
15862+ asm volatile(LOCK_PREFIX "decq %0\n"
15863+
15864+#ifdef CONFIG_PAX_REFCOUNT
15865+ "jno 0f\n"
15866+ LOCK_PREFIX "incq %0\n"
15867+ "int $4\n0:\n"
15868+ _ASM_EXTABLE(0b, 0b)
15869+#endif
15870+
15871+ : "=m" (v->counter)
15872+ : "m" (v->counter));
15873+}
15874+
15875+/**
15876+ * atomic64_dec_unchecked - decrement atomic64 variable
15877+ * @v: pointer to type atomic64_t
15878+ *
15879+ * Atomically decrements @v by 1.
15880+ */
15881+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
15882+{
15883+ asm volatile(LOCK_PREFIX "decq %0\n"
15884 : "=m" (v->counter)
15885 : "m" (v->counter));
15886 }
15887@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
15888 */
15889 static inline int atomic64_dec_and_test(atomic64_t *v)
15890 {
15891- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
15892+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
15893 }
15894
15895 /**
15896@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
15897 */
15898 static inline int atomic64_inc_and_test(atomic64_t *v)
15899 {
15900- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
15901+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
15902 }
15903
15904 /**
15905@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
15906 */
15907 static inline int atomic64_add_negative(long i, atomic64_t *v)
15908 {
15909- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
15910+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s");
15911 }
15912
15913 /**
15914@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
15915 */
15916 static inline long atomic64_add_return(long i, atomic64_t *v)
15917 {
15918+ return i + xadd_check_overflow(&v->counter, i);
15919+}
15920+
15921+/**
15922+ * atomic64_add_return_unchecked - add and return
15923+ * @i: integer value to add
15924+ * @v: pointer to type atomic64_unchecked_t
15925+ *
15926+ * Atomically adds @i to @v and returns @i + @v
15927+ */
15928+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
15929+{
15930 return i + xadd(&v->counter, i);
15931 }
15932
15933@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
15934 }
15935
15936 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
15937+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15938+{
15939+ return atomic64_add_return_unchecked(1, v);
15940+}
15941 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
15942
15943 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15944@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15945 return cmpxchg(&v->counter, old, new);
15946 }
15947
15948+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
15949+{
15950+ return cmpxchg(&v->counter, old, new);
15951+}
15952+
15953 static inline long atomic64_xchg(atomic64_t *v, long new)
15954 {
15955 return xchg(&v->counter, new);
15956@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
15957 */
15958 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
15959 {
15960- long c, old;
15961+ long c, old, new;
15962 c = atomic64_read(v);
15963 for (;;) {
15964- if (unlikely(c == (u)))
15965+ if (unlikely(c == u))
15966 break;
15967- old = atomic64_cmpxchg((v), c, c + (a));
15968+
15969+ asm volatile("add %2,%0\n"
15970+
15971+#ifdef CONFIG_PAX_REFCOUNT
15972+ "jno 0f\n"
15973+ "sub %2,%0\n"
15974+ "int $4\n0:\n"
15975+ _ASM_EXTABLE(0b, 0b)
15976+#endif
15977+
15978+ : "=r" (new)
15979+ : "0" (c), "ir" (a));
15980+
15981+ old = atomic64_cmpxchg(v, c, new);
15982 if (likely(old == c))
15983 break;
15984 c = old;
15985 }
15986- return c != (u);
15987+ return c != u;
15988 }
15989
15990 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
15991diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
15992index 2ab1eb3..1e8cc5d 100644
15993--- a/arch/x86/include/asm/barrier.h
15994+++ b/arch/x86/include/asm/barrier.h
15995@@ -57,7 +57,7 @@
15996 do { \
15997 compiletime_assert_atomic_type(*p); \
15998 smp_mb(); \
15999- ACCESS_ONCE(*p) = (v); \
16000+ ACCESS_ONCE_RW(*p) = (v); \
16001 } while (0)
16002
16003 #define smp_load_acquire(p) \
16004@@ -74,7 +74,7 @@ do { \
16005 do { \
16006 compiletime_assert_atomic_type(*p); \
16007 barrier(); \
16008- ACCESS_ONCE(*p) = (v); \
16009+ ACCESS_ONCE_RW(*p) = (v); \
16010 } while (0)
16011
16012 #define smp_load_acquire(p) \
16013diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
16014index cfe3b95..d01b118 100644
16015--- a/arch/x86/include/asm/bitops.h
16016+++ b/arch/x86/include/asm/bitops.h
16017@@ -50,7 +50,7 @@
16018 * a mask operation on a byte.
16019 */
16020 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
16021-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
16022+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
16023 #define CONST_MASK(nr) (1 << ((nr) & 7))
16024
16025 /**
16026@@ -203,7 +203,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
16027 */
16028 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
16029 {
16030- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16031+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16032 }
16033
16034 /**
16035@@ -249,7 +249,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
16036 */
16037 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
16038 {
16039- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16040+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16041 }
16042
16043 /**
16044@@ -302,7 +302,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
16045 */
16046 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
16047 {
16048- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16049+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16050 }
16051
16052 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
16053@@ -343,7 +343,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
16054 *
16055 * Undefined if no bit exists, so code should check against 0 first.
16056 */
16057-static inline unsigned long __ffs(unsigned long word)
16058+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
16059 {
16060 asm("rep; bsf %1,%0"
16061 : "=r" (word)
16062@@ -357,7 +357,7 @@ static inline unsigned long __ffs(unsigned long word)
16063 *
16064 * Undefined if no zero exists, so code should check against ~0UL first.
16065 */
16066-static inline unsigned long ffz(unsigned long word)
16067+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
16068 {
16069 asm("rep; bsf %1,%0"
16070 : "=r" (word)
16071@@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word)
16072 *
16073 * Undefined if no set bit exists, so code should check against 0 first.
16074 */
16075-static inline unsigned long __fls(unsigned long word)
16076+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
16077 {
16078 asm("bsr %1,%0"
16079 : "=r" (word)
16080@@ -434,7 +434,7 @@ static inline int ffs(int x)
16081 * set bit if value is nonzero. The last (most significant) bit is
16082 * at position 32.
16083 */
16084-static inline int fls(int x)
16085+static inline int __intentional_overflow(-1) fls(int x)
16086 {
16087 int r;
16088
16089@@ -476,7 +476,7 @@ static inline int fls(int x)
16090 * at position 64.
16091 */
16092 #ifdef CONFIG_X86_64
16093-static __always_inline int fls64(__u64 x)
16094+static __always_inline __intentional_overflow(-1) int fls64(__u64 x)
16095 {
16096 int bitpos = -1;
16097 /*
16098diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
16099index 4fa687a..60f2d39 100644
16100--- a/arch/x86/include/asm/boot.h
16101+++ b/arch/x86/include/asm/boot.h
16102@@ -6,10 +6,15 @@
16103 #include <uapi/asm/boot.h>
16104
16105 /* Physical address where kernel should be loaded. */
16106-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16107+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16108 + (CONFIG_PHYSICAL_ALIGN - 1)) \
16109 & ~(CONFIG_PHYSICAL_ALIGN - 1))
16110
16111+#ifndef __ASSEMBLY__
16112+extern unsigned char __LOAD_PHYSICAL_ADDR[];
16113+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
16114+#endif
16115+
16116 /* Minimum kernel alignment, as a power of two */
16117 #ifdef CONFIG_X86_64
16118 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
16119diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
16120index 48f99f1..d78ebf9 100644
16121--- a/arch/x86/include/asm/cache.h
16122+++ b/arch/x86/include/asm/cache.h
16123@@ -5,12 +5,13 @@
16124
16125 /* L1 cache line size */
16126 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
16127-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
16128+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
16129
16130 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
16131+#define __read_only __attribute__((__section__(".data..read_only")))
16132
16133 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
16134-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
16135+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
16136
16137 #ifdef CONFIG_X86_VSMP
16138 #ifdef CONFIG_SMP
16139diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
16140index 1f1297b..72b8439 100644
16141--- a/arch/x86/include/asm/calling.h
16142+++ b/arch/x86/include/asm/calling.h
16143@@ -82,106 +82,117 @@ For 32-bit we have the following conventions - kernel is built with
16144 #define RSP 152
16145 #define SS 160
16146
16147-#define ARGOFFSET R11
16148+#define ARGOFFSET R15
16149
16150 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1, rax_enosys=0
16151- subq $9*8+\addskip, %rsp
16152- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
16153- movq_cfi rdi, 8*8
16154- movq_cfi rsi, 7*8
16155- movq_cfi rdx, 6*8
16156+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
16157+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
16158+ movq_cfi rdi, RDI
16159+ movq_cfi rsi, RSI
16160+ movq_cfi rdx, RDX
16161
16162 .if \save_rcx
16163- movq_cfi rcx, 5*8
16164+ movq_cfi rcx, RCX
16165 .endif
16166
16167 .if \rax_enosys
16168- movq $-ENOSYS, 4*8(%rsp)
16169+ movq $-ENOSYS, RAX(%rsp)
16170 .else
16171- movq_cfi rax, 4*8
16172+ movq_cfi rax, RAX
16173 .endif
16174
16175 .if \save_r891011
16176- movq_cfi r8, 3*8
16177- movq_cfi r9, 2*8
16178- movq_cfi r10, 1*8
16179- movq_cfi r11, 0*8
16180+ movq_cfi r8, R8
16181+ movq_cfi r9, R9
16182+ movq_cfi r10, R10
16183+ movq_cfi r11, R11
16184 .endif
16185
16186+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16187+ movq_cfi r12, R12
16188+#endif
16189+
16190 .endm
16191
16192-#define ARG_SKIP (9*8)
16193+#define ARG_SKIP ORIG_RAX
16194
16195 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
16196 rstor_r8910=1, rstor_rdx=1
16197+
16198+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16199+ movq_cfi_restore R12, r12
16200+#endif
16201+
16202 .if \rstor_r11
16203- movq_cfi_restore 0*8, r11
16204+ movq_cfi_restore R11, r11
16205 .endif
16206
16207 .if \rstor_r8910
16208- movq_cfi_restore 1*8, r10
16209- movq_cfi_restore 2*8, r9
16210- movq_cfi_restore 3*8, r8
16211+ movq_cfi_restore R10, r10
16212+ movq_cfi_restore R9, r9
16213+ movq_cfi_restore R8, r8
16214 .endif
16215
16216 .if \rstor_rax
16217- movq_cfi_restore 4*8, rax
16218+ movq_cfi_restore RAX, rax
16219 .endif
16220
16221 .if \rstor_rcx
16222- movq_cfi_restore 5*8, rcx
16223+ movq_cfi_restore RCX, rcx
16224 .endif
16225
16226 .if \rstor_rdx
16227- movq_cfi_restore 6*8, rdx
16228+ movq_cfi_restore RDX, rdx
16229 .endif
16230
16231- movq_cfi_restore 7*8, rsi
16232- movq_cfi_restore 8*8, rdi
16233+ movq_cfi_restore RSI, rsi
16234+ movq_cfi_restore RDI, rdi
16235
16236- .if ARG_SKIP+\addskip > 0
16237- addq $ARG_SKIP+\addskip, %rsp
16238- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
16239+ .if ORIG_RAX+\addskip > 0
16240+ addq $ORIG_RAX+\addskip, %rsp
16241+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
16242 .endif
16243 .endm
16244
16245- .macro LOAD_ARGS offset, skiprax=0
16246- movq \offset(%rsp), %r11
16247- movq \offset+8(%rsp), %r10
16248- movq \offset+16(%rsp), %r9
16249- movq \offset+24(%rsp), %r8
16250- movq \offset+40(%rsp), %rcx
16251- movq \offset+48(%rsp), %rdx
16252- movq \offset+56(%rsp), %rsi
16253- movq \offset+64(%rsp), %rdi
16254+ .macro LOAD_ARGS skiprax=0
16255+ movq R11(%rsp), %r11
16256+ movq R10(%rsp), %r10
16257+ movq R9(%rsp), %r9
16258+ movq R8(%rsp), %r8
16259+ movq RCX(%rsp), %rcx
16260+ movq RDX(%rsp), %rdx
16261+ movq RSI(%rsp), %rsi
16262+ movq RDI(%rsp), %rdi
16263 .if \skiprax
16264 .else
16265- movq \offset+72(%rsp), %rax
16266+ movq ORIG_RAX(%rsp), %rax
16267 .endif
16268 .endm
16269
16270-#define REST_SKIP (6*8)
16271-
16272 .macro SAVE_REST
16273- subq $REST_SKIP, %rsp
16274- CFI_ADJUST_CFA_OFFSET REST_SKIP
16275- movq_cfi rbx, 5*8
16276- movq_cfi rbp, 4*8
16277- movq_cfi r12, 3*8
16278- movq_cfi r13, 2*8
16279- movq_cfi r14, 1*8
16280- movq_cfi r15, 0*8
16281+ movq_cfi rbx, RBX
16282+ movq_cfi rbp, RBP
16283+
16284+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16285+ movq_cfi r12, R12
16286+#endif
16287+
16288+ movq_cfi r13, R13
16289+ movq_cfi r14, R14
16290+ movq_cfi r15, R15
16291 .endm
16292
16293 .macro RESTORE_REST
16294- movq_cfi_restore 0*8, r15
16295- movq_cfi_restore 1*8, r14
16296- movq_cfi_restore 2*8, r13
16297- movq_cfi_restore 3*8, r12
16298- movq_cfi_restore 4*8, rbp
16299- movq_cfi_restore 5*8, rbx
16300- addq $REST_SKIP, %rsp
16301- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
16302+ movq_cfi_restore R15, r15
16303+ movq_cfi_restore R14, r14
16304+ movq_cfi_restore R13, r13
16305+
16306+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16307+ movq_cfi_restore R12, r12
16308+#endif
16309+
16310+ movq_cfi_restore RBP, rbp
16311+ movq_cfi_restore RBX, rbx
16312 .endm
16313
16314 .macro SAVE_ALL
16315diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
16316index f50de69..2b0a458 100644
16317--- a/arch/x86/include/asm/checksum_32.h
16318+++ b/arch/x86/include/asm/checksum_32.h
16319@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
16320 int len, __wsum sum,
16321 int *src_err_ptr, int *dst_err_ptr);
16322
16323+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
16324+ int len, __wsum sum,
16325+ int *src_err_ptr, int *dst_err_ptr);
16326+
16327+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
16328+ int len, __wsum sum,
16329+ int *src_err_ptr, int *dst_err_ptr);
16330+
16331 /*
16332 * Note: when you get a NULL pointer exception here this means someone
16333 * passed in an incorrect kernel address to one of these functions.
16334@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
16335
16336 might_sleep();
16337 stac();
16338- ret = csum_partial_copy_generic((__force void *)src, dst,
16339+ ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
16340 len, sum, err_ptr, NULL);
16341 clac();
16342
16343@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
16344 might_sleep();
16345 if (access_ok(VERIFY_WRITE, dst, len)) {
16346 stac();
16347- ret = csum_partial_copy_generic(src, (__force void *)dst,
16348+ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
16349 len, sum, NULL, err_ptr);
16350 clac();
16351 return ret;
16352diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
16353index 99c105d7..2f667ac 100644
16354--- a/arch/x86/include/asm/cmpxchg.h
16355+++ b/arch/x86/include/asm/cmpxchg.h
16356@@ -16,8 +16,12 @@ extern void __cmpxchg_wrong_size(void)
16357 __compiletime_error("Bad argument size for cmpxchg");
16358 extern void __xadd_wrong_size(void)
16359 __compiletime_error("Bad argument size for xadd");
16360+extern void __xadd_check_overflow_wrong_size(void)
16361+ __compiletime_error("Bad argument size for xadd_check_overflow");
16362 extern void __add_wrong_size(void)
16363 __compiletime_error("Bad argument size for add");
16364+extern void __add_check_overflow_wrong_size(void)
16365+ __compiletime_error("Bad argument size for add_check_overflow");
16366
16367 /*
16368 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
16369@@ -69,6 +73,38 @@ extern void __add_wrong_size(void)
16370 __ret; \
16371 })
16372
16373+#ifdef CONFIG_PAX_REFCOUNT
16374+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
16375+ ({ \
16376+ __typeof__ (*(ptr)) __ret = (arg); \
16377+ switch (sizeof(*(ptr))) { \
16378+ case __X86_CASE_L: \
16379+ asm volatile (lock #op "l %0, %1\n" \
16380+ "jno 0f\n" \
16381+ "mov %0,%1\n" \
16382+ "int $4\n0:\n" \
16383+ _ASM_EXTABLE(0b, 0b) \
16384+ : "+r" (__ret), "+m" (*(ptr)) \
16385+ : : "memory", "cc"); \
16386+ break; \
16387+ case __X86_CASE_Q: \
16388+ asm volatile (lock #op "q %q0, %1\n" \
16389+ "jno 0f\n" \
16390+ "mov %0,%1\n" \
16391+ "int $4\n0:\n" \
16392+ _ASM_EXTABLE(0b, 0b) \
16393+ : "+r" (__ret), "+m" (*(ptr)) \
16394+ : : "memory", "cc"); \
16395+ break; \
16396+ default: \
16397+ __ ## op ## _check_overflow_wrong_size(); \
16398+ } \
16399+ __ret; \
16400+ })
16401+#else
16402+#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
16403+#endif
16404+
16405 /*
16406 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
16407 * Since this is generally used to protect other memory information, we
16408@@ -167,6 +203,9 @@ extern void __add_wrong_size(void)
16409 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
16410 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
16411
16412+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
16413+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
16414+
16415 #define __add(ptr, inc, lock) \
16416 ({ \
16417 __typeof__ (*(ptr)) __ret = (inc); \
16418diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
16419index 59c6c40..5e0b22c 100644
16420--- a/arch/x86/include/asm/compat.h
16421+++ b/arch/x86/include/asm/compat.h
16422@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
16423 typedef u32 compat_uint_t;
16424 typedef u32 compat_ulong_t;
16425 typedef u64 __attribute__((aligned(4))) compat_u64;
16426-typedef u32 compat_uptr_t;
16427+typedef u32 __user compat_uptr_t;
16428
16429 struct compat_timespec {
16430 compat_time_t tv_sec;
16431diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
16432index 90a5485..43b6211 100644
16433--- a/arch/x86/include/asm/cpufeature.h
16434+++ b/arch/x86/include/asm/cpufeature.h
16435@@ -213,7 +213,7 @@
16436 #define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
16437 #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
16438 #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
16439-
16440+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
16441
16442 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
16443 #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
16444@@ -221,7 +221,7 @@
16445 #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
16446 #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
16447 #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
16448-#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
16449+#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Prevention */
16450 #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
16451 #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
16452 #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
16453@@ -390,6 +390,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
16454 #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
16455 #define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
16456 #define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT)
16457+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
16458
16459 #if __GNUC__ >= 4
16460 extern void warn_pre_alternatives(void);
16461@@ -441,7 +442,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16462
16463 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
16464 t_warn:
16465- warn_pre_alternatives();
16466+ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
16467+ warn_pre_alternatives();
16468 return false;
16469 #endif
16470
16471@@ -461,7 +463,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16472 ".section .discard,\"aw\",@progbits\n"
16473 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16474 ".previous\n"
16475- ".section .altinstr_replacement,\"ax\"\n"
16476+ ".section .altinstr_replacement,\"a\"\n"
16477 "3: movb $1,%0\n"
16478 "4:\n"
16479 ".previous\n"
16480@@ -498,7 +500,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16481 " .byte 2b - 1b\n" /* src len */
16482 " .byte 4f - 3f\n" /* repl len */
16483 ".previous\n"
16484- ".section .altinstr_replacement,\"ax\"\n"
16485+ ".section .altinstr_replacement,\"a\"\n"
16486 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
16487 "4:\n"
16488 ".previous\n"
16489@@ -531,7 +533,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16490 ".section .discard,\"aw\",@progbits\n"
16491 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16492 ".previous\n"
16493- ".section .altinstr_replacement,\"ax\"\n"
16494+ ".section .altinstr_replacement,\"a\"\n"
16495 "3: movb $0,%0\n"
16496 "4:\n"
16497 ".previous\n"
16498@@ -545,7 +547,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16499 ".section .discard,\"aw\",@progbits\n"
16500 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
16501 ".previous\n"
16502- ".section .altinstr_replacement,\"ax\"\n"
16503+ ".section .altinstr_replacement,\"a\"\n"
16504 "5: movb $1,%0\n"
16505 "6:\n"
16506 ".previous\n"
16507diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
16508index a94b82e..59ecefa 100644
16509--- a/arch/x86/include/asm/desc.h
16510+++ b/arch/x86/include/asm/desc.h
16511@@ -4,6 +4,7 @@
16512 #include <asm/desc_defs.h>
16513 #include <asm/ldt.h>
16514 #include <asm/mmu.h>
16515+#include <asm/pgtable.h>
16516
16517 #include <linux/smp.h>
16518 #include <linux/percpu.h>
16519@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16520
16521 desc->type = (info->read_exec_only ^ 1) << 1;
16522 desc->type |= info->contents << 2;
16523+ desc->type |= info->seg_not_present ^ 1;
16524
16525 desc->s = 1;
16526 desc->dpl = 0x3;
16527@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16528 }
16529
16530 extern struct desc_ptr idt_descr;
16531-extern gate_desc idt_table[];
16532-extern struct desc_ptr debug_idt_descr;
16533-extern gate_desc debug_idt_table[];
16534-
16535-struct gdt_page {
16536- struct desc_struct gdt[GDT_ENTRIES];
16537-} __attribute__((aligned(PAGE_SIZE)));
16538-
16539-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
16540+extern gate_desc idt_table[IDT_ENTRIES];
16541+extern const struct desc_ptr debug_idt_descr;
16542+extern gate_desc debug_idt_table[IDT_ENTRIES];
16543
16544+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
16545 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
16546 {
16547- return per_cpu(gdt_page, cpu).gdt;
16548+ return cpu_gdt_table[cpu];
16549 }
16550
16551 #ifdef CONFIG_X86_64
16552@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
16553 unsigned long base, unsigned dpl, unsigned flags,
16554 unsigned short seg)
16555 {
16556- gate->a = (seg << 16) | (base & 0xffff);
16557- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
16558+ gate->gate.offset_low = base;
16559+ gate->gate.seg = seg;
16560+ gate->gate.reserved = 0;
16561+ gate->gate.type = type;
16562+ gate->gate.s = 0;
16563+ gate->gate.dpl = dpl;
16564+ gate->gate.p = 1;
16565+ gate->gate.offset_high = base >> 16;
16566 }
16567
16568 #endif
16569@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
16570
16571 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
16572 {
16573+ pax_open_kernel();
16574 memcpy(&idt[entry], gate, sizeof(*gate));
16575+ pax_close_kernel();
16576 }
16577
16578 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
16579 {
16580+ pax_open_kernel();
16581 memcpy(&ldt[entry], desc, 8);
16582+ pax_close_kernel();
16583 }
16584
16585 static inline void
16586@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
16587 default: size = sizeof(*gdt); break;
16588 }
16589
16590+ pax_open_kernel();
16591 memcpy(&gdt[entry], desc, size);
16592+ pax_close_kernel();
16593 }
16594
16595 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
16596@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
16597
16598 static inline void native_load_tr_desc(void)
16599 {
16600+ pax_open_kernel();
16601 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
16602+ pax_close_kernel();
16603 }
16604
16605 static inline void native_load_gdt(const struct desc_ptr *dtr)
16606@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
16607 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
16608 unsigned int i;
16609
16610+ pax_open_kernel();
16611 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
16612 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
16613+ pax_close_kernel();
16614 }
16615
16616 /* This intentionally ignores lm, since 32-bit apps don't have that field. */
16617@@ -295,7 +308,7 @@ static inline void load_LDT(mm_context_t *pc)
16618 preempt_enable();
16619 }
16620
16621-static inline unsigned long get_desc_base(const struct desc_struct *desc)
16622+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
16623 {
16624 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
16625 }
16626@@ -319,7 +332,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
16627 }
16628
16629 #ifdef CONFIG_X86_64
16630-static inline void set_nmi_gate(int gate, void *addr)
16631+static inline void set_nmi_gate(int gate, const void *addr)
16632 {
16633 gate_desc s;
16634
16635@@ -329,14 +342,14 @@ static inline void set_nmi_gate(int gate, void *addr)
16636 #endif
16637
16638 #ifdef CONFIG_TRACING
16639-extern struct desc_ptr trace_idt_descr;
16640-extern gate_desc trace_idt_table[];
16641+extern const struct desc_ptr trace_idt_descr;
16642+extern gate_desc trace_idt_table[IDT_ENTRIES];
16643 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16644 {
16645 write_idt_entry(trace_idt_table, entry, gate);
16646 }
16647
16648-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
16649+static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
16650 unsigned dpl, unsigned ist, unsigned seg)
16651 {
16652 gate_desc s;
16653@@ -356,7 +369,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16654 #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
16655 #endif
16656
16657-static inline void _set_gate(int gate, unsigned type, void *addr,
16658+static inline void _set_gate(int gate, unsigned type, const void *addr,
16659 unsigned dpl, unsigned ist, unsigned seg)
16660 {
16661 gate_desc s;
16662@@ -379,9 +392,9 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
16663 #define set_intr_gate(n, addr) \
16664 do { \
16665 BUG_ON((unsigned)n > 0xFF); \
16666- _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
16667+ _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \
16668 __KERNEL_CS); \
16669- _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
16670+ _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
16671 0, 0, __KERNEL_CS); \
16672 } while (0)
16673
16674@@ -409,19 +422,19 @@ static inline void alloc_system_vector(int vector)
16675 /*
16676 * This routine sets up an interrupt gate at directory privilege level 3.
16677 */
16678-static inline void set_system_intr_gate(unsigned int n, void *addr)
16679+static inline void set_system_intr_gate(unsigned int n, const void *addr)
16680 {
16681 BUG_ON((unsigned)n > 0xFF);
16682 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
16683 }
16684
16685-static inline void set_system_trap_gate(unsigned int n, void *addr)
16686+static inline void set_system_trap_gate(unsigned int n, const void *addr)
16687 {
16688 BUG_ON((unsigned)n > 0xFF);
16689 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
16690 }
16691
16692-static inline void set_trap_gate(unsigned int n, void *addr)
16693+static inline void set_trap_gate(unsigned int n, const void *addr)
16694 {
16695 BUG_ON((unsigned)n > 0xFF);
16696 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
16697@@ -430,16 +443,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
16698 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
16699 {
16700 BUG_ON((unsigned)n > 0xFF);
16701- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
16702+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
16703 }
16704
16705-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
16706+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
16707 {
16708 BUG_ON((unsigned)n > 0xFF);
16709 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
16710 }
16711
16712-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
16713+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
16714 {
16715 BUG_ON((unsigned)n > 0xFF);
16716 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
16717@@ -511,4 +524,17 @@ static inline void load_current_idt(void)
16718 else
16719 load_idt((const struct desc_ptr *)&idt_descr);
16720 }
16721+
16722+#ifdef CONFIG_X86_32
16723+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
16724+{
16725+ struct desc_struct d;
16726+
16727+ if (likely(limit))
16728+ limit = (limit - 1UL) >> PAGE_SHIFT;
16729+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
16730+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
16731+}
16732+#endif
16733+
16734 #endif /* _ASM_X86_DESC_H */
16735diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
16736index 278441f..b95a174 100644
16737--- a/arch/x86/include/asm/desc_defs.h
16738+++ b/arch/x86/include/asm/desc_defs.h
16739@@ -31,6 +31,12 @@ struct desc_struct {
16740 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
16741 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
16742 };
16743+ struct {
16744+ u16 offset_low;
16745+ u16 seg;
16746+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
16747+ unsigned offset_high: 16;
16748+ } gate;
16749 };
16750 } __attribute__((packed));
16751
16752diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
16753index ced283a..ffe04cc 100644
16754--- a/arch/x86/include/asm/div64.h
16755+++ b/arch/x86/include/asm/div64.h
16756@@ -39,7 +39,7 @@
16757 __mod; \
16758 })
16759
16760-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16761+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16762 {
16763 union {
16764 u64 v64;
16765diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
16766index ca3347a..1a5082a 100644
16767--- a/arch/x86/include/asm/elf.h
16768+++ b/arch/x86/include/asm/elf.h
16769@@ -75,9 +75,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
16770
16771 #include <asm/vdso.h>
16772
16773-#ifdef CONFIG_X86_64
16774-extern unsigned int vdso64_enabled;
16775-#endif
16776 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
16777 extern unsigned int vdso32_enabled;
16778 #endif
16779@@ -249,7 +246,25 @@ extern int force_personality32;
16780 the loader. We need to make sure that it is out of the way of the program
16781 that it will "exec", and that there is sufficient room for the brk. */
16782
16783+#ifdef CONFIG_PAX_SEGMEXEC
16784+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
16785+#else
16786 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
16787+#endif
16788+
16789+#ifdef CONFIG_PAX_ASLR
16790+#ifdef CONFIG_X86_32
16791+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
16792+
16793+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16794+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16795+#else
16796+#define PAX_ELF_ET_DYN_BASE 0x400000UL
16797+
16798+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16799+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16800+#endif
16801+#endif
16802
16803 /* This yields a mask that user programs can use to figure out what
16804 instruction set this CPU supports. This could be done in user space,
16805@@ -298,17 +313,13 @@ do { \
16806
16807 #define ARCH_DLINFO \
16808 do { \
16809- if (vdso64_enabled) \
16810- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16811- (unsigned long __force)current->mm->context.vdso); \
16812+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16813 } while (0)
16814
16815 /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
16816 #define ARCH_DLINFO_X32 \
16817 do { \
16818- if (vdso64_enabled) \
16819- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16820- (unsigned long __force)current->mm->context.vdso); \
16821+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16822 } while (0)
16823
16824 #define AT_SYSINFO 32
16825@@ -323,10 +334,10 @@ else \
16826
16827 #endif /* !CONFIG_X86_32 */
16828
16829-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
16830+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
16831
16832 #define VDSO_ENTRY \
16833- ((unsigned long)current->mm->context.vdso + \
16834+ (current->mm->context.vdso + \
16835 selected_vdso32->sym___kernel_vsyscall)
16836
16837 struct linux_binprm;
16838@@ -338,9 +349,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
16839 int uses_interp);
16840 #define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
16841
16842-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
16843-#define arch_randomize_brk arch_randomize_brk
16844-
16845 /*
16846 * True on X86_32 or when emulating IA32 on X86_64
16847 */
16848diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
16849index 77a99ac..39ff7f5 100644
16850--- a/arch/x86/include/asm/emergency-restart.h
16851+++ b/arch/x86/include/asm/emergency-restart.h
16852@@ -1,6 +1,6 @@
16853 #ifndef _ASM_X86_EMERGENCY_RESTART_H
16854 #define _ASM_X86_EMERGENCY_RESTART_H
16855
16856-extern void machine_emergency_restart(void);
16857+extern void machine_emergency_restart(void) __noreturn;
16858
16859 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
16860diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
16861index 1c7eefe..d0e4702 100644
16862--- a/arch/x86/include/asm/floppy.h
16863+++ b/arch/x86/include/asm/floppy.h
16864@@ -229,18 +229,18 @@ static struct fd_routine_l {
16865 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
16866 } fd_routine[] = {
16867 {
16868- request_dma,
16869- free_dma,
16870- get_dma_residue,
16871- dma_mem_alloc,
16872- hard_dma_setup
16873+ ._request_dma = request_dma,
16874+ ._free_dma = free_dma,
16875+ ._get_dma_residue = get_dma_residue,
16876+ ._dma_mem_alloc = dma_mem_alloc,
16877+ ._dma_setup = hard_dma_setup
16878 },
16879 {
16880- vdma_request_dma,
16881- vdma_nop,
16882- vdma_get_dma_residue,
16883- vdma_mem_alloc,
16884- vdma_dma_setup
16885+ ._request_dma = vdma_request_dma,
16886+ ._free_dma = vdma_nop,
16887+ ._get_dma_residue = vdma_get_dma_residue,
16888+ ._dma_mem_alloc = vdma_mem_alloc,
16889+ ._dma_setup = vdma_dma_setup
16890 }
16891 };
16892
16893diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
16894index 72ba21a..79f3f66 100644
16895--- a/arch/x86/include/asm/fpu-internal.h
16896+++ b/arch/x86/include/asm/fpu-internal.h
16897@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16898 #define user_insn(insn, output, input...) \
16899 ({ \
16900 int err; \
16901+ pax_open_userland(); \
16902 asm volatile(ASM_STAC "\n" \
16903- "1:" #insn "\n\t" \
16904+ "1:" \
16905+ __copyuser_seg \
16906+ #insn "\n\t" \
16907 "2: " ASM_CLAC "\n" \
16908 ".section .fixup,\"ax\"\n" \
16909 "3: movl $-1,%[err]\n" \
16910@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16911 _ASM_EXTABLE(1b, 3b) \
16912 : [err] "=r" (err), output \
16913 : "0"(0), input); \
16914+ pax_close_userland(); \
16915 err; \
16916 })
16917
16918@@ -300,7 +304,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
16919 "fnclex\n\t"
16920 "emms\n\t"
16921 "fildl %P[addr]" /* set F?P to defined value */
16922- : : [addr] "m" (tsk->thread.fpu.has_fpu));
16923+ : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
16924 }
16925
16926 return fpu_restore_checking(&tsk->thread.fpu);
16927diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
16928index b4c1f54..e290c08 100644
16929--- a/arch/x86/include/asm/futex.h
16930+++ b/arch/x86/include/asm/futex.h
16931@@ -12,6 +12,7 @@
16932 #include <asm/smap.h>
16933
16934 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
16935+ typecheck(u32 __user *, uaddr); \
16936 asm volatile("\t" ASM_STAC "\n" \
16937 "1:\t" insn "\n" \
16938 "2:\t" ASM_CLAC "\n" \
16939@@ -20,15 +21,16 @@
16940 "\tjmp\t2b\n" \
16941 "\t.previous\n" \
16942 _ASM_EXTABLE(1b, 3b) \
16943- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
16944+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
16945 : "i" (-EFAULT), "0" (oparg), "1" (0))
16946
16947 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
16948+ typecheck(u32 __user *, uaddr); \
16949 asm volatile("\t" ASM_STAC "\n" \
16950 "1:\tmovl %2, %0\n" \
16951 "\tmovl\t%0, %3\n" \
16952 "\t" insn "\n" \
16953- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
16954+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
16955 "\tjnz\t1b\n" \
16956 "3:\t" ASM_CLAC "\n" \
16957 "\t.section .fixup,\"ax\"\n" \
16958@@ -38,7 +40,7 @@
16959 _ASM_EXTABLE(1b, 4b) \
16960 _ASM_EXTABLE(2b, 4b) \
16961 : "=&a" (oldval), "=&r" (ret), \
16962- "+m" (*uaddr), "=&r" (tem) \
16963+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
16964 : "r" (oparg), "i" (-EFAULT), "1" (0))
16965
16966 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16967@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16968
16969 pagefault_disable();
16970
16971+ pax_open_userland();
16972 switch (op) {
16973 case FUTEX_OP_SET:
16974- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
16975+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
16976 break;
16977 case FUTEX_OP_ADD:
16978- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
16979+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
16980 uaddr, oparg);
16981 break;
16982 case FUTEX_OP_OR:
16983@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16984 default:
16985 ret = -ENOSYS;
16986 }
16987+ pax_close_userland();
16988
16989 pagefault_enable();
16990
16991diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
16992index 9662290..49ca5e5 100644
16993--- a/arch/x86/include/asm/hw_irq.h
16994+++ b/arch/x86/include/asm/hw_irq.h
16995@@ -160,8 +160,8 @@ static inline void unlock_vector_lock(void) {}
16996 #endif /* CONFIG_X86_LOCAL_APIC */
16997
16998 /* Statistics */
16999-extern atomic_t irq_err_count;
17000-extern atomic_t irq_mis_count;
17001+extern atomic_unchecked_t irq_err_count;
17002+extern atomic_unchecked_t irq_mis_count;
17003
17004 /* EISA */
17005 extern void eisa_set_level_irq(unsigned int irq);
17006diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
17007index ccffa53..3c90c87 100644
17008--- a/arch/x86/include/asm/i8259.h
17009+++ b/arch/x86/include/asm/i8259.h
17010@@ -62,7 +62,7 @@ struct legacy_pic {
17011 void (*init)(int auto_eoi);
17012 int (*irq_pending)(unsigned int irq);
17013 void (*make_irq)(unsigned int irq);
17014-};
17015+} __do_const;
17016
17017 extern struct legacy_pic *legacy_pic;
17018 extern struct legacy_pic null_legacy_pic;
17019diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
17020index 34a5b93..27e40a6 100644
17021--- a/arch/x86/include/asm/io.h
17022+++ b/arch/x86/include/asm/io.h
17023@@ -52,12 +52,12 @@ static inline void name(type val, volatile void __iomem *addr) \
17024 "m" (*(volatile type __force *)addr) barrier); }
17025
17026 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
17027-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
17028-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
17029+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
17030+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
17031
17032 build_mmio_read(__readb, "b", unsigned char, "=q", )
17033-build_mmio_read(__readw, "w", unsigned short, "=r", )
17034-build_mmio_read(__readl, "l", unsigned int, "=r", )
17035+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
17036+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
17037
17038 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
17039 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
17040@@ -113,7 +113,7 @@ build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
17041 * this function
17042 */
17043
17044-static inline phys_addr_t virt_to_phys(volatile void *address)
17045+static inline phys_addr_t __intentional_overflow(-1) virt_to_phys(volatile void *address)
17046 {
17047 return __pa(address);
17048 }
17049@@ -189,7 +189,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
17050 return ioremap_nocache(offset, size);
17051 }
17052
17053-extern void iounmap(volatile void __iomem *addr);
17054+extern void iounmap(const volatile void __iomem *addr);
17055
17056 extern void set_iounmap_nonlazy(void);
17057
17058@@ -199,6 +199,17 @@ extern void set_iounmap_nonlazy(void);
17059
17060 #include <linux/vmalloc.h>
17061
17062+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
17063+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
17064+{
17065+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17066+}
17067+
17068+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
17069+{
17070+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17071+}
17072+
17073 /*
17074 * Convert a virtual cached pointer to an uncached pointer
17075 */
17076diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
17077index 0a8b519..80e7d5b 100644
17078--- a/arch/x86/include/asm/irqflags.h
17079+++ b/arch/x86/include/asm/irqflags.h
17080@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
17081 sti; \
17082 sysexit
17083
17084+#define GET_CR0_INTO_RDI mov %cr0, %rdi
17085+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
17086+#define GET_CR3_INTO_RDI mov %cr3, %rdi
17087+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
17088+
17089 #else
17090 #define INTERRUPT_RETURN iret
17091 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
17092diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
17093index 4421b5d..8543006 100644
17094--- a/arch/x86/include/asm/kprobes.h
17095+++ b/arch/x86/include/asm/kprobes.h
17096@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
17097 #define RELATIVEJUMP_SIZE 5
17098 #define RELATIVECALL_OPCODE 0xe8
17099 #define RELATIVE_ADDR_SIZE 4
17100-#define MAX_STACK_SIZE 64
17101-#define MIN_STACK_SIZE(ADDR) \
17102- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
17103- THREAD_SIZE - (unsigned long)(ADDR))) \
17104- ? (MAX_STACK_SIZE) \
17105- : (((unsigned long)current_thread_info()) + \
17106- THREAD_SIZE - (unsigned long)(ADDR)))
17107+#define MAX_STACK_SIZE 64UL
17108+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
17109
17110 #define flush_insn_slot(p) do { } while (0)
17111
17112diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
17113index 4ad6560..75c7bdd 100644
17114--- a/arch/x86/include/asm/local.h
17115+++ b/arch/x86/include/asm/local.h
17116@@ -10,33 +10,97 @@ typedef struct {
17117 atomic_long_t a;
17118 } local_t;
17119
17120+typedef struct {
17121+ atomic_long_unchecked_t a;
17122+} local_unchecked_t;
17123+
17124 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
17125
17126 #define local_read(l) atomic_long_read(&(l)->a)
17127+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
17128 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
17129+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
17130
17131 static inline void local_inc(local_t *l)
17132 {
17133- asm volatile(_ASM_INC "%0"
17134+ asm volatile(_ASM_INC "%0\n"
17135+
17136+#ifdef CONFIG_PAX_REFCOUNT
17137+ "jno 0f\n"
17138+ _ASM_DEC "%0\n"
17139+ "int $4\n0:\n"
17140+ _ASM_EXTABLE(0b, 0b)
17141+#endif
17142+
17143+ : "+m" (l->a.counter));
17144+}
17145+
17146+static inline void local_inc_unchecked(local_unchecked_t *l)
17147+{
17148+ asm volatile(_ASM_INC "%0\n"
17149 : "+m" (l->a.counter));
17150 }
17151
17152 static inline void local_dec(local_t *l)
17153 {
17154- asm volatile(_ASM_DEC "%0"
17155+ asm volatile(_ASM_DEC "%0\n"
17156+
17157+#ifdef CONFIG_PAX_REFCOUNT
17158+ "jno 0f\n"
17159+ _ASM_INC "%0\n"
17160+ "int $4\n0:\n"
17161+ _ASM_EXTABLE(0b, 0b)
17162+#endif
17163+
17164+ : "+m" (l->a.counter));
17165+}
17166+
17167+static inline void local_dec_unchecked(local_unchecked_t *l)
17168+{
17169+ asm volatile(_ASM_DEC "%0\n"
17170 : "+m" (l->a.counter));
17171 }
17172
17173 static inline void local_add(long i, local_t *l)
17174 {
17175- asm volatile(_ASM_ADD "%1,%0"
17176+ asm volatile(_ASM_ADD "%1,%0\n"
17177+
17178+#ifdef CONFIG_PAX_REFCOUNT
17179+ "jno 0f\n"
17180+ _ASM_SUB "%1,%0\n"
17181+ "int $4\n0:\n"
17182+ _ASM_EXTABLE(0b, 0b)
17183+#endif
17184+
17185+ : "+m" (l->a.counter)
17186+ : "ir" (i));
17187+}
17188+
17189+static inline void local_add_unchecked(long i, local_unchecked_t *l)
17190+{
17191+ asm volatile(_ASM_ADD "%1,%0\n"
17192 : "+m" (l->a.counter)
17193 : "ir" (i));
17194 }
17195
17196 static inline void local_sub(long i, local_t *l)
17197 {
17198- asm volatile(_ASM_SUB "%1,%0"
17199+ asm volatile(_ASM_SUB "%1,%0\n"
17200+
17201+#ifdef CONFIG_PAX_REFCOUNT
17202+ "jno 0f\n"
17203+ _ASM_ADD "%1,%0\n"
17204+ "int $4\n0:\n"
17205+ _ASM_EXTABLE(0b, 0b)
17206+#endif
17207+
17208+ : "+m" (l->a.counter)
17209+ : "ir" (i));
17210+}
17211+
17212+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
17213+{
17214+ asm volatile(_ASM_SUB "%1,%0\n"
17215 : "+m" (l->a.counter)
17216 : "ir" (i));
17217 }
17218@@ -52,7 +116,7 @@ static inline void local_sub(long i, local_t *l)
17219 */
17220 static inline int local_sub_and_test(long i, local_t *l)
17221 {
17222- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
17223+ GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e");
17224 }
17225
17226 /**
17227@@ -65,7 +129,7 @@ static inline int local_sub_and_test(long i, local_t *l)
17228 */
17229 static inline int local_dec_and_test(local_t *l)
17230 {
17231- GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
17232+ GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e");
17233 }
17234
17235 /**
17236@@ -78,7 +142,7 @@ static inline int local_dec_and_test(local_t *l)
17237 */
17238 static inline int local_inc_and_test(local_t *l)
17239 {
17240- GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
17241+ GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e");
17242 }
17243
17244 /**
17245@@ -92,7 +156,7 @@ static inline int local_inc_and_test(local_t *l)
17246 */
17247 static inline int local_add_negative(long i, local_t *l)
17248 {
17249- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
17250+ GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s");
17251 }
17252
17253 /**
17254@@ -105,6 +169,30 @@ static inline int local_add_negative(long i, local_t *l)
17255 static inline long local_add_return(long i, local_t *l)
17256 {
17257 long __i = i;
17258+ asm volatile(_ASM_XADD "%0, %1\n"
17259+
17260+#ifdef CONFIG_PAX_REFCOUNT
17261+ "jno 0f\n"
17262+ _ASM_MOV "%0,%1\n"
17263+ "int $4\n0:\n"
17264+ _ASM_EXTABLE(0b, 0b)
17265+#endif
17266+
17267+ : "+r" (i), "+m" (l->a.counter)
17268+ : : "memory");
17269+ return i + __i;
17270+}
17271+
17272+/**
17273+ * local_add_return_unchecked - add and return
17274+ * @i: integer value to add
17275+ * @l: pointer to type local_unchecked_t
17276+ *
17277+ * Atomically adds @i to @l and returns @i + @l
17278+ */
17279+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
17280+{
17281+ long __i = i;
17282 asm volatile(_ASM_XADD "%0, %1;"
17283 : "+r" (i), "+m" (l->a.counter)
17284 : : "memory");
17285@@ -121,6 +209,8 @@ static inline long local_sub_return(long i, local_t *l)
17286
17287 #define local_cmpxchg(l, o, n) \
17288 (cmpxchg_local(&((l)->a.counter), (o), (n)))
17289+#define local_cmpxchg_unchecked(l, o, n) \
17290+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
17291 /* Always has a lock prefix */
17292 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
17293
17294diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
17295new file mode 100644
17296index 0000000..2bfd3ba
17297--- /dev/null
17298+++ b/arch/x86/include/asm/mman.h
17299@@ -0,0 +1,15 @@
17300+#ifndef _X86_MMAN_H
17301+#define _X86_MMAN_H
17302+
17303+#include <uapi/asm/mman.h>
17304+
17305+#ifdef __KERNEL__
17306+#ifndef __ASSEMBLY__
17307+#ifdef CONFIG_X86_32
17308+#define arch_mmap_check i386_mmap_check
17309+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
17310+#endif
17311+#endif
17312+#endif
17313+
17314+#endif /* X86_MMAN_H */
17315diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
17316index 09b9620..923aecd 100644
17317--- a/arch/x86/include/asm/mmu.h
17318+++ b/arch/x86/include/asm/mmu.h
17319@@ -9,7 +9,7 @@
17320 * we put the segment information here.
17321 */
17322 typedef struct {
17323- void *ldt;
17324+ struct desc_struct *ldt;
17325 int size;
17326
17327 #ifdef CONFIG_X86_64
17328@@ -18,7 +18,19 @@ typedef struct {
17329 #endif
17330
17331 struct mutex lock;
17332- void __user *vdso;
17333+ unsigned long vdso;
17334+
17335+#ifdef CONFIG_X86_32
17336+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17337+ unsigned long user_cs_base;
17338+ unsigned long user_cs_limit;
17339+
17340+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17341+ cpumask_t cpu_user_cs_mask;
17342+#endif
17343+
17344+#endif
17345+#endif
17346
17347 atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */
17348 } mm_context_t;
17349diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
17350index 883f6b93..6869d96 100644
17351--- a/arch/x86/include/asm/mmu_context.h
17352+++ b/arch/x86/include/asm/mmu_context.h
17353@@ -42,6 +42,20 @@ void destroy_context(struct mm_struct *mm);
17354
17355 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
17356 {
17357+
17358+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17359+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
17360+ unsigned int i;
17361+ pgd_t *pgd;
17362+
17363+ pax_open_kernel();
17364+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
17365+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
17366+ set_pgd_batched(pgd+i, native_make_pgd(0));
17367+ pax_close_kernel();
17368+ }
17369+#endif
17370+
17371 #ifdef CONFIG_SMP
17372 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
17373 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
17374@@ -52,16 +66,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17375 struct task_struct *tsk)
17376 {
17377 unsigned cpu = smp_processor_id();
17378+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17379+ int tlbstate = TLBSTATE_OK;
17380+#endif
17381
17382 if (likely(prev != next)) {
17383 #ifdef CONFIG_SMP
17384+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17385+ tlbstate = this_cpu_read(cpu_tlbstate.state);
17386+#endif
17387 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17388 this_cpu_write(cpu_tlbstate.active_mm, next);
17389 #endif
17390 cpumask_set_cpu(cpu, mm_cpumask(next));
17391
17392 /* Re-load page tables */
17393+#ifdef CONFIG_PAX_PER_CPU_PGD
17394+ pax_open_kernel();
17395+
17396+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17397+ if (static_cpu_has(X86_FEATURE_PCID))
17398+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17399+ else
17400+#endif
17401+
17402+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17403+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17404+ pax_close_kernel();
17405+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17406+
17407+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17408+ if (static_cpu_has(X86_FEATURE_PCID)) {
17409+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17410+ u64 descriptor[2];
17411+ descriptor[0] = PCID_USER;
17412+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17413+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17414+ descriptor[0] = PCID_KERNEL;
17415+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17416+ }
17417+ } else {
17418+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17419+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17420+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17421+ else
17422+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17423+ }
17424+ } else
17425+#endif
17426+
17427+ load_cr3(get_cpu_pgd(cpu, kernel));
17428+#else
17429 load_cr3(next->pgd);
17430+#endif
17431 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17432
17433 /* Stop flush ipis for the previous mm */
17434@@ -84,9 +141,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17435 */
17436 if (unlikely(prev->context.ldt != next->context.ldt))
17437 load_LDT_nolock(&next->context);
17438+
17439+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17440+ if (!(__supported_pte_mask & _PAGE_NX)) {
17441+ smp_mb__before_atomic();
17442+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
17443+ smp_mb__after_atomic();
17444+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17445+ }
17446+#endif
17447+
17448+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17449+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
17450+ prev->context.user_cs_limit != next->context.user_cs_limit))
17451+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17452+#ifdef CONFIG_SMP
17453+ else if (unlikely(tlbstate != TLBSTATE_OK))
17454+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17455+#endif
17456+#endif
17457+
17458 }
17459+ else {
17460+
17461+#ifdef CONFIG_PAX_PER_CPU_PGD
17462+ pax_open_kernel();
17463+
17464+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17465+ if (static_cpu_has(X86_FEATURE_PCID))
17466+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17467+ else
17468+#endif
17469+
17470+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17471+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17472+ pax_close_kernel();
17473+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17474+
17475+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17476+ if (static_cpu_has(X86_FEATURE_PCID)) {
17477+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17478+ u64 descriptor[2];
17479+ descriptor[0] = PCID_USER;
17480+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17481+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17482+ descriptor[0] = PCID_KERNEL;
17483+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17484+ }
17485+ } else {
17486+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17487+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17488+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17489+ else
17490+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17491+ }
17492+ } else
17493+#endif
17494+
17495+ load_cr3(get_cpu_pgd(cpu, kernel));
17496+#endif
17497+
17498 #ifdef CONFIG_SMP
17499- else {
17500 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17501 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
17502
17503@@ -103,13 +218,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17504 * tlb flush IPI delivery. We must reload CR3
17505 * to make sure to use no freed page tables.
17506 */
17507+
17508+#ifndef CONFIG_PAX_PER_CPU_PGD
17509 load_cr3(next->pgd);
17510 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17511+#endif
17512+
17513 load_mm_cr4(next);
17514 load_LDT_nolock(&next->context);
17515+
17516+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17517+ if (!(__supported_pte_mask & _PAGE_NX))
17518+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17519+#endif
17520+
17521+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17522+#ifdef CONFIG_PAX_PAGEEXEC
17523+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
17524+#endif
17525+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17526+#endif
17527+
17528 }
17529+#endif
17530 }
17531-#endif
17532 }
17533
17534 #define activate_mm(prev, next) \
17535diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
17536index e3b7819..b257c64 100644
17537--- a/arch/x86/include/asm/module.h
17538+++ b/arch/x86/include/asm/module.h
17539@@ -5,6 +5,7 @@
17540
17541 #ifdef CONFIG_X86_64
17542 /* X86_64 does not define MODULE_PROC_FAMILY */
17543+#define MODULE_PROC_FAMILY ""
17544 #elif defined CONFIG_M486
17545 #define MODULE_PROC_FAMILY "486 "
17546 #elif defined CONFIG_M586
17547@@ -57,8 +58,20 @@
17548 #error unknown processor family
17549 #endif
17550
17551-#ifdef CONFIG_X86_32
17552-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
17553+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
17554+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
17555+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
17556+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
17557+#else
17558+#define MODULE_PAX_KERNEXEC ""
17559 #endif
17560
17561+#ifdef CONFIG_PAX_MEMORY_UDEREF
17562+#define MODULE_PAX_UDEREF "UDEREF "
17563+#else
17564+#define MODULE_PAX_UDEREF ""
17565+#endif
17566+
17567+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
17568+
17569 #endif /* _ASM_X86_MODULE_H */
17570diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
17571index 5f2fc44..106caa6 100644
17572--- a/arch/x86/include/asm/nmi.h
17573+++ b/arch/x86/include/asm/nmi.h
17574@@ -36,26 +36,35 @@ enum {
17575
17576 typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
17577
17578+struct nmiaction;
17579+
17580+struct nmiwork {
17581+ const struct nmiaction *action;
17582+ u64 max_duration;
17583+ struct irq_work irq_work;
17584+};
17585+
17586 struct nmiaction {
17587 struct list_head list;
17588 nmi_handler_t handler;
17589- u64 max_duration;
17590- struct irq_work irq_work;
17591 unsigned long flags;
17592 const char *name;
17593-};
17594+ struct nmiwork *work;
17595+} __do_const;
17596
17597 #define register_nmi_handler(t, fn, fg, n, init...) \
17598 ({ \
17599- static struct nmiaction init fn##_na = { \
17600+ static struct nmiwork fn##_nw; \
17601+ static const struct nmiaction init fn##_na = { \
17602 .handler = (fn), \
17603 .name = (n), \
17604 .flags = (fg), \
17605+ .work = &fn##_nw, \
17606 }; \
17607 __register_nmi_handler((t), &fn##_na); \
17608 })
17609
17610-int __register_nmi_handler(unsigned int, struct nmiaction *);
17611+int __register_nmi_handler(unsigned int, const struct nmiaction *);
17612
17613 void unregister_nmi_handler(unsigned int, const char *);
17614
17615diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
17616index 802dde3..9183e68 100644
17617--- a/arch/x86/include/asm/page.h
17618+++ b/arch/x86/include/asm/page.h
17619@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17620 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
17621
17622 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
17623+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
17624
17625 #define __boot_va(x) __va(x)
17626 #define __boot_pa(x) __pa(x)
17627@@ -60,11 +61,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17628 * virt_to_page(kaddr) returns a valid pointer if and only if
17629 * virt_addr_valid(kaddr) returns true.
17630 */
17631-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17632 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
17633 extern bool __virt_addr_valid(unsigned long kaddr);
17634 #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
17635
17636+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
17637+#define virt_to_page(kaddr) \
17638+ ({ \
17639+ const void *__kaddr = (const void *)(kaddr); \
17640+ BUG_ON(!virt_addr_valid(__kaddr)); \
17641+ pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \
17642+ })
17643+#else
17644+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17645+#endif
17646+
17647 #endif /* __ASSEMBLY__ */
17648
17649 #include <asm-generic/memory_model.h>
17650diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
17651index b3bebf9..13ac22e 100644
17652--- a/arch/x86/include/asm/page_64.h
17653+++ b/arch/x86/include/asm/page_64.h
17654@@ -7,9 +7,9 @@
17655
17656 /* duplicated to the one in bootmem.h */
17657 extern unsigned long max_pfn;
17658-extern unsigned long phys_base;
17659+extern const unsigned long phys_base;
17660
17661-static inline unsigned long __phys_addr_nodebug(unsigned long x)
17662+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
17663 {
17664 unsigned long y = x - __START_KERNEL_map;
17665
17666@@ -20,8 +20,8 @@ static inline unsigned long __phys_addr_nodebug(unsigned long x)
17667 }
17668
17669 #ifdef CONFIG_DEBUG_VIRTUAL
17670-extern unsigned long __phys_addr(unsigned long);
17671-extern unsigned long __phys_addr_symbol(unsigned long);
17672+extern unsigned long __intentional_overflow(-1) __phys_addr(unsigned long);
17673+extern unsigned long __intentional_overflow(-1) __phys_addr_symbol(unsigned long);
17674 #else
17675 #define __phys_addr(x) __phys_addr_nodebug(x)
17676 #define __phys_addr_symbol(x) \
17677diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
17678index 965c47d..ffe0af8 100644
17679--- a/arch/x86/include/asm/paravirt.h
17680+++ b/arch/x86/include/asm/paravirt.h
17681@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
17682 return (pmd_t) { ret };
17683 }
17684
17685-static inline pmdval_t pmd_val(pmd_t pmd)
17686+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
17687 {
17688 pmdval_t ret;
17689
17690@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
17691 val);
17692 }
17693
17694+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
17695+{
17696+ pgdval_t val = native_pgd_val(pgd);
17697+
17698+ if (sizeof(pgdval_t) > sizeof(long))
17699+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
17700+ val, (u64)val >> 32);
17701+ else
17702+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
17703+ val);
17704+}
17705+
17706 static inline void pgd_clear(pgd_t *pgdp)
17707 {
17708 set_pgd(pgdp, __pgd(0));
17709@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
17710 pv_mmu_ops.set_fixmap(idx, phys, flags);
17711 }
17712
17713+#ifdef CONFIG_PAX_KERNEXEC
17714+static inline unsigned long pax_open_kernel(void)
17715+{
17716+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
17717+}
17718+
17719+static inline unsigned long pax_close_kernel(void)
17720+{
17721+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
17722+}
17723+#else
17724+static inline unsigned long pax_open_kernel(void) { return 0; }
17725+static inline unsigned long pax_close_kernel(void) { return 0; }
17726+#endif
17727+
17728 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
17729
17730 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
17731@@ -906,7 +933,7 @@ extern void default_banner(void);
17732
17733 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
17734 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
17735-#define PARA_INDIRECT(addr) *%cs:addr
17736+#define PARA_INDIRECT(addr) *%ss:addr
17737 #endif
17738
17739 #define INTERRUPT_RETURN \
17740@@ -981,6 +1008,21 @@ extern void default_banner(void);
17741 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
17742 CLBR_NONE, \
17743 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
17744+
17745+#define GET_CR0_INTO_RDI \
17746+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
17747+ mov %rax,%rdi
17748+
17749+#define SET_RDI_INTO_CR0 \
17750+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
17751+
17752+#define GET_CR3_INTO_RDI \
17753+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
17754+ mov %rax,%rdi
17755+
17756+#define SET_RDI_INTO_CR3 \
17757+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
17758+
17759 #endif /* CONFIG_X86_32 */
17760
17761 #endif /* __ASSEMBLY__ */
17762diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
17763index 7549b8b..f0edfda 100644
17764--- a/arch/x86/include/asm/paravirt_types.h
17765+++ b/arch/x86/include/asm/paravirt_types.h
17766@@ -84,7 +84,7 @@ struct pv_init_ops {
17767 */
17768 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
17769 unsigned long addr, unsigned len);
17770-};
17771+} __no_const __no_randomize_layout;
17772
17773
17774 struct pv_lazy_ops {
17775@@ -92,13 +92,13 @@ struct pv_lazy_ops {
17776 void (*enter)(void);
17777 void (*leave)(void);
17778 void (*flush)(void);
17779-};
17780+} __no_randomize_layout;
17781
17782 struct pv_time_ops {
17783 unsigned long long (*sched_clock)(void);
17784 unsigned long long (*steal_clock)(int cpu);
17785 unsigned long (*get_tsc_khz)(void);
17786-};
17787+} __no_const __no_randomize_layout;
17788
17789 struct pv_cpu_ops {
17790 /* hooks for various privileged instructions */
17791@@ -192,7 +192,7 @@ struct pv_cpu_ops {
17792
17793 void (*start_context_switch)(struct task_struct *prev);
17794 void (*end_context_switch)(struct task_struct *next);
17795-};
17796+} __no_const __no_randomize_layout;
17797
17798 struct pv_irq_ops {
17799 /*
17800@@ -215,7 +215,7 @@ struct pv_irq_ops {
17801 #ifdef CONFIG_X86_64
17802 void (*adjust_exception_frame)(void);
17803 #endif
17804-};
17805+} __no_randomize_layout;
17806
17807 struct pv_apic_ops {
17808 #ifdef CONFIG_X86_LOCAL_APIC
17809@@ -223,7 +223,7 @@ struct pv_apic_ops {
17810 unsigned long start_eip,
17811 unsigned long start_esp);
17812 #endif
17813-};
17814+} __no_const __no_randomize_layout;
17815
17816 struct pv_mmu_ops {
17817 unsigned long (*read_cr2)(void);
17818@@ -313,6 +313,7 @@ struct pv_mmu_ops {
17819 struct paravirt_callee_save make_pud;
17820
17821 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
17822+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
17823 #endif /* PAGETABLE_LEVELS == 4 */
17824 #endif /* PAGETABLE_LEVELS >= 3 */
17825
17826@@ -324,7 +325,13 @@ struct pv_mmu_ops {
17827 an mfn. We can tell which is which from the index. */
17828 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
17829 phys_addr_t phys, pgprot_t flags);
17830-};
17831+
17832+#ifdef CONFIG_PAX_KERNEXEC
17833+ unsigned long (*pax_open_kernel)(void);
17834+ unsigned long (*pax_close_kernel)(void);
17835+#endif
17836+
17837+} __no_randomize_layout;
17838
17839 struct arch_spinlock;
17840 #ifdef CONFIG_SMP
17841@@ -336,11 +343,14 @@ typedef u16 __ticket_t;
17842 struct pv_lock_ops {
17843 struct paravirt_callee_save lock_spinning;
17844 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
17845-};
17846+} __no_randomize_layout;
17847
17848 /* This contains all the paravirt structures: we get a convenient
17849 * number for each function using the offset which we use to indicate
17850- * what to patch. */
17851+ * what to patch.
17852+ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
17853+ */
17854+
17855 struct paravirt_patch_template {
17856 struct pv_init_ops pv_init_ops;
17857 struct pv_time_ops pv_time_ops;
17858@@ -349,7 +359,7 @@ struct paravirt_patch_template {
17859 struct pv_apic_ops pv_apic_ops;
17860 struct pv_mmu_ops pv_mmu_ops;
17861 struct pv_lock_ops pv_lock_ops;
17862-};
17863+} __no_randomize_layout;
17864
17865 extern struct pv_info pv_info;
17866 extern struct pv_init_ops pv_init_ops;
17867diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
17868index c4412e9..90e88c5 100644
17869--- a/arch/x86/include/asm/pgalloc.h
17870+++ b/arch/x86/include/asm/pgalloc.h
17871@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
17872 pmd_t *pmd, pte_t *pte)
17873 {
17874 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17875+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
17876+}
17877+
17878+static inline void pmd_populate_user(struct mm_struct *mm,
17879+ pmd_t *pmd, pte_t *pte)
17880+{
17881+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17882 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
17883 }
17884
17885@@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
17886
17887 #ifdef CONFIG_X86_PAE
17888 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
17889+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
17890+{
17891+ pud_populate(mm, pudp, pmd);
17892+}
17893 #else /* !CONFIG_X86_PAE */
17894 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17895 {
17896 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17897 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
17898 }
17899+
17900+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17901+{
17902+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17903+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
17904+}
17905 #endif /* CONFIG_X86_PAE */
17906
17907 #if PAGETABLE_LEVELS > 3
17908@@ -123,6 +140,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17909 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
17910 }
17911
17912+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17913+{
17914+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
17915+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
17916+}
17917+
17918 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
17919 {
17920 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
17921diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
17922index fd74a11..35fd5af 100644
17923--- a/arch/x86/include/asm/pgtable-2level.h
17924+++ b/arch/x86/include/asm/pgtable-2level.h
17925@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
17926
17927 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17928 {
17929+ pax_open_kernel();
17930 *pmdp = pmd;
17931+ pax_close_kernel();
17932 }
17933
17934 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17935diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
17936index cdaa58c..e61122b 100644
17937--- a/arch/x86/include/asm/pgtable-3level.h
17938+++ b/arch/x86/include/asm/pgtable-3level.h
17939@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17940
17941 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17942 {
17943+ pax_open_kernel();
17944 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
17945+ pax_close_kernel();
17946 }
17947
17948 static inline void native_set_pud(pud_t *pudp, pud_t pud)
17949 {
17950+ pax_open_kernel();
17951 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
17952+ pax_close_kernel();
17953 }
17954
17955 /*
17956diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
17957index a0c35bf..7045c6a 100644
17958--- a/arch/x86/include/asm/pgtable.h
17959+++ b/arch/x86/include/asm/pgtable.h
17960@@ -47,6 +47,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17961
17962 #ifndef __PAGETABLE_PUD_FOLDED
17963 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
17964+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
17965 #define pgd_clear(pgd) native_pgd_clear(pgd)
17966 #endif
17967
17968@@ -84,12 +85,53 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17969
17970 #define arch_end_context_switch(prev) do {} while(0)
17971
17972+#define pax_open_kernel() native_pax_open_kernel()
17973+#define pax_close_kernel() native_pax_close_kernel()
17974 #endif /* CONFIG_PARAVIRT */
17975
17976+#define __HAVE_ARCH_PAX_OPEN_KERNEL
17977+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
17978+
17979+#ifdef CONFIG_PAX_KERNEXEC
17980+static inline unsigned long native_pax_open_kernel(void)
17981+{
17982+ unsigned long cr0;
17983+
17984+ preempt_disable();
17985+ barrier();
17986+ cr0 = read_cr0() ^ X86_CR0_WP;
17987+ BUG_ON(cr0 & X86_CR0_WP);
17988+ write_cr0(cr0);
17989+ barrier();
17990+ return cr0 ^ X86_CR0_WP;
17991+}
17992+
17993+static inline unsigned long native_pax_close_kernel(void)
17994+{
17995+ unsigned long cr0;
17996+
17997+ barrier();
17998+ cr0 = read_cr0() ^ X86_CR0_WP;
17999+ BUG_ON(!(cr0 & X86_CR0_WP));
18000+ write_cr0(cr0);
18001+ barrier();
18002+ preempt_enable_no_resched();
18003+ return cr0 ^ X86_CR0_WP;
18004+}
18005+#else
18006+static inline unsigned long native_pax_open_kernel(void) { return 0; }
18007+static inline unsigned long native_pax_close_kernel(void) { return 0; }
18008+#endif
18009+
18010 /*
18011 * The following only work if pte_present() is true.
18012 * Undefined behaviour if not..
18013 */
18014+static inline int pte_user(pte_t pte)
18015+{
18016+ return pte_val(pte) & _PAGE_USER;
18017+}
18018+
18019 static inline int pte_dirty(pte_t pte)
18020 {
18021 return pte_flags(pte) & _PAGE_DIRTY;
18022@@ -150,6 +192,11 @@ static inline unsigned long pud_pfn(pud_t pud)
18023 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
18024 }
18025
18026+static inline unsigned long pgd_pfn(pgd_t pgd)
18027+{
18028+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
18029+}
18030+
18031 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
18032
18033 static inline int pmd_large(pmd_t pte)
18034@@ -203,9 +250,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
18035 return pte_clear_flags(pte, _PAGE_RW);
18036 }
18037
18038+static inline pte_t pte_mkread(pte_t pte)
18039+{
18040+ return __pte(pte_val(pte) | _PAGE_USER);
18041+}
18042+
18043 static inline pte_t pte_mkexec(pte_t pte)
18044 {
18045- return pte_clear_flags(pte, _PAGE_NX);
18046+#ifdef CONFIG_X86_PAE
18047+ if (__supported_pte_mask & _PAGE_NX)
18048+ return pte_clear_flags(pte, _PAGE_NX);
18049+ else
18050+#endif
18051+ return pte_set_flags(pte, _PAGE_USER);
18052+}
18053+
18054+static inline pte_t pte_exprotect(pte_t pte)
18055+{
18056+#ifdef CONFIG_X86_PAE
18057+ if (__supported_pte_mask & _PAGE_NX)
18058+ return pte_set_flags(pte, _PAGE_NX);
18059+ else
18060+#endif
18061+ return pte_clear_flags(pte, _PAGE_USER);
18062 }
18063
18064 static inline pte_t pte_mkdirty(pte_t pte)
18065@@ -420,6 +487,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
18066 #endif
18067
18068 #ifndef __ASSEMBLY__
18069+
18070+#ifdef CONFIG_PAX_PER_CPU_PGD
18071+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
18072+enum cpu_pgd_type {kernel = 0, user = 1};
18073+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
18074+{
18075+ return cpu_pgd[cpu][type];
18076+}
18077+#endif
18078+
18079 #include <linux/mm_types.h>
18080 #include <linux/mmdebug.h>
18081 #include <linux/log2.h>
18082@@ -571,7 +648,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
18083 * Currently stuck as a macro due to indirect forward reference to
18084 * linux/mmzone.h's __section_mem_map_addr() definition:
18085 */
18086-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
18087+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
18088
18089 /* Find an entry in the second-level page table.. */
18090 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
18091@@ -611,7 +688,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
18092 * Currently stuck as a macro due to indirect forward reference to
18093 * linux/mmzone.h's __section_mem_map_addr() definition:
18094 */
18095-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
18096+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
18097
18098 /* to find an entry in a page-table-directory. */
18099 static inline unsigned long pud_index(unsigned long address)
18100@@ -626,7 +703,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
18101
18102 static inline int pgd_bad(pgd_t pgd)
18103 {
18104- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
18105+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
18106 }
18107
18108 static inline int pgd_none(pgd_t pgd)
18109@@ -649,7 +726,12 @@ static inline int pgd_none(pgd_t pgd)
18110 * pgd_offset() returns a (pgd_t *)
18111 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
18112 */
18113-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
18114+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
18115+
18116+#ifdef CONFIG_PAX_PER_CPU_PGD
18117+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
18118+#endif
18119+
18120 /*
18121 * a shortcut which implies the use of the kernel's pgd, instead
18122 * of a process's
18123@@ -660,6 +742,25 @@ static inline int pgd_none(pgd_t pgd)
18124 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
18125 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
18126
18127+#ifdef CONFIG_X86_32
18128+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
18129+#else
18130+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
18131+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
18132+
18133+#ifdef CONFIG_PAX_MEMORY_UDEREF
18134+#ifdef __ASSEMBLY__
18135+#define pax_user_shadow_base pax_user_shadow_base(%rip)
18136+#else
18137+extern unsigned long pax_user_shadow_base;
18138+extern pgdval_t clone_pgd_mask;
18139+#endif
18140+#else
18141+#define pax_user_shadow_base (0UL)
18142+#endif
18143+
18144+#endif
18145+
18146 #ifndef __ASSEMBLY__
18147
18148 extern int direct_gbpages;
18149@@ -826,11 +927,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
18150 * dst and src can be on the same page, but the range must not overlap,
18151 * and must not cross a page boundary.
18152 */
18153-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
18154+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
18155 {
18156- memcpy(dst, src, count * sizeof(pgd_t));
18157+ pax_open_kernel();
18158+ while (count--)
18159+ *dst++ = *src++;
18160+ pax_close_kernel();
18161 }
18162
18163+#ifdef CONFIG_PAX_PER_CPU_PGD
18164+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
18165+#endif
18166+
18167+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18168+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
18169+#else
18170+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
18171+#endif
18172+
18173 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
18174 static inline int page_level_shift(enum pg_level level)
18175 {
18176diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
18177index b6c0b40..3535d47 100644
18178--- a/arch/x86/include/asm/pgtable_32.h
18179+++ b/arch/x86/include/asm/pgtable_32.h
18180@@ -25,9 +25,6 @@
18181 struct mm_struct;
18182 struct vm_area_struct;
18183
18184-extern pgd_t swapper_pg_dir[1024];
18185-extern pgd_t initial_page_table[1024];
18186-
18187 static inline void pgtable_cache_init(void) { }
18188 static inline void check_pgt_cache(void) { }
18189 void paging_init(void);
18190@@ -45,6 +42,12 @@ void paging_init(void);
18191 # include <asm/pgtable-2level.h>
18192 #endif
18193
18194+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
18195+extern pgd_t initial_page_table[PTRS_PER_PGD];
18196+#ifdef CONFIG_X86_PAE
18197+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
18198+#endif
18199+
18200 #if defined(CONFIG_HIGHPTE)
18201 #define pte_offset_map(dir, address) \
18202 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
18203@@ -59,12 +62,17 @@ void paging_init(void);
18204 /* Clear a kernel PTE and flush it from the TLB */
18205 #define kpte_clear_flush(ptep, vaddr) \
18206 do { \
18207+ pax_open_kernel(); \
18208 pte_clear(&init_mm, (vaddr), (ptep)); \
18209+ pax_close_kernel(); \
18210 __flush_tlb_one((vaddr)); \
18211 } while (0)
18212
18213 #endif /* !__ASSEMBLY__ */
18214
18215+#define HAVE_ARCH_UNMAPPED_AREA
18216+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
18217+
18218 /*
18219 * kern_addr_valid() is (1) for FLATMEM and (0) for
18220 * SPARSEMEM and DISCONTIGMEM
18221diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
18222index 9fb2f2b..b04b4bf 100644
18223--- a/arch/x86/include/asm/pgtable_32_types.h
18224+++ b/arch/x86/include/asm/pgtable_32_types.h
18225@@ -8,7 +8,7 @@
18226 */
18227 #ifdef CONFIG_X86_PAE
18228 # include <asm/pgtable-3level_types.h>
18229-# define PMD_SIZE (1UL << PMD_SHIFT)
18230+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
18231 # define PMD_MASK (~(PMD_SIZE - 1))
18232 #else
18233 # include <asm/pgtable-2level_types.h>
18234@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
18235 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
18236 #endif
18237
18238+#ifdef CONFIG_PAX_KERNEXEC
18239+#ifndef __ASSEMBLY__
18240+extern unsigned char MODULES_EXEC_VADDR[];
18241+extern unsigned char MODULES_EXEC_END[];
18242+#endif
18243+#include <asm/boot.h>
18244+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
18245+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
18246+#else
18247+#define ktla_ktva(addr) (addr)
18248+#define ktva_ktla(addr) (addr)
18249+#endif
18250+
18251 #define MODULES_VADDR VMALLOC_START
18252 #define MODULES_END VMALLOC_END
18253 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
18254diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
18255index 2ee7811..55aca24 100644
18256--- a/arch/x86/include/asm/pgtable_64.h
18257+++ b/arch/x86/include/asm/pgtable_64.h
18258@@ -16,11 +16,16 @@
18259
18260 extern pud_t level3_kernel_pgt[512];
18261 extern pud_t level3_ident_pgt[512];
18262+extern pud_t level3_vmalloc_start_pgt[512];
18263+extern pud_t level3_vmalloc_end_pgt[512];
18264+extern pud_t level3_vmemmap_pgt[512];
18265+extern pud_t level2_vmemmap_pgt[512];
18266 extern pmd_t level2_kernel_pgt[512];
18267 extern pmd_t level2_fixmap_pgt[512];
18268-extern pmd_t level2_ident_pgt[512];
18269-extern pte_t level1_fixmap_pgt[512];
18270-extern pgd_t init_level4_pgt[];
18271+extern pmd_t level2_ident_pgt[2][512];
18272+extern pte_t level1_fixmap_pgt[3][512];
18273+extern pte_t level1_vsyscall_pgt[512];
18274+extern pgd_t init_level4_pgt[512];
18275
18276 #define swapper_pg_dir init_level4_pgt
18277
18278@@ -62,7 +67,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18279
18280 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18281 {
18282+ pax_open_kernel();
18283 *pmdp = pmd;
18284+ pax_close_kernel();
18285 }
18286
18287 static inline void native_pmd_clear(pmd_t *pmd)
18288@@ -98,7 +105,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
18289
18290 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18291 {
18292+ pax_open_kernel();
18293 *pudp = pud;
18294+ pax_close_kernel();
18295 }
18296
18297 static inline void native_pud_clear(pud_t *pud)
18298@@ -108,6 +117,13 @@ static inline void native_pud_clear(pud_t *pud)
18299
18300 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
18301 {
18302+ pax_open_kernel();
18303+ *pgdp = pgd;
18304+ pax_close_kernel();
18305+}
18306+
18307+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
18308+{
18309 *pgdp = pgd;
18310 }
18311
18312diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
18313index 602b602..acb53ed 100644
18314--- a/arch/x86/include/asm/pgtable_64_types.h
18315+++ b/arch/x86/include/asm/pgtable_64_types.h
18316@@ -61,11 +61,16 @@ typedef struct { pteval_t pte; } pte_t;
18317 #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
18318 #define MODULES_END _AC(0xffffffffff000000, UL)
18319 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
18320+#define MODULES_EXEC_VADDR MODULES_VADDR
18321+#define MODULES_EXEC_END MODULES_END
18322 #define ESPFIX_PGD_ENTRY _AC(-2, UL)
18323 #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
18324 #define EFI_VA_START ( -4 * (_AC(1, UL) << 30))
18325 #define EFI_VA_END (-68 * (_AC(1, UL) << 30))
18326
18327+#define ktla_ktva(addr) (addr)
18328+#define ktva_ktla(addr) (addr)
18329+
18330 #define EARLY_DYNAMIC_PAGE_TABLES 64
18331
18332 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
18333diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
18334index 8c7c108..1c1b77f 100644
18335--- a/arch/x86/include/asm/pgtable_types.h
18336+++ b/arch/x86/include/asm/pgtable_types.h
18337@@ -85,8 +85,10 @@
18338
18339 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
18340 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
18341-#else
18342+#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY)
18343 #define _PAGE_NX (_AT(pteval_t, 0))
18344+#else
18345+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
18346 #endif
18347
18348 #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
18349@@ -141,6 +143,9 @@ enum page_cache_mode {
18350 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
18351 _PAGE_ACCESSED)
18352
18353+#define PAGE_READONLY_NOEXEC PAGE_READONLY
18354+#define PAGE_SHARED_NOEXEC PAGE_SHARED
18355+
18356 #define __PAGE_KERNEL_EXEC \
18357 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
18358 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
18359@@ -148,7 +153,7 @@ enum page_cache_mode {
18360 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
18361 #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
18362 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE)
18363-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
18364+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
18365 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
18366 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
18367 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
18368@@ -194,7 +199,7 @@ enum page_cache_mode {
18369 #ifdef CONFIG_X86_64
18370 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
18371 #else
18372-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
18373+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18374 #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18375 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
18376 #endif
18377@@ -233,7 +238,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
18378 {
18379 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
18380 }
18381+#endif
18382
18383+#if PAGETABLE_LEVELS == 3
18384+#include <asm-generic/pgtable-nopud.h>
18385+#endif
18386+
18387+#if PAGETABLE_LEVELS == 2
18388+#include <asm-generic/pgtable-nopmd.h>
18389+#endif
18390+
18391+#ifndef __ASSEMBLY__
18392 #if PAGETABLE_LEVELS > 3
18393 typedef struct { pudval_t pud; } pud_t;
18394
18395@@ -247,8 +262,6 @@ static inline pudval_t native_pud_val(pud_t pud)
18396 return pud.pud;
18397 }
18398 #else
18399-#include <asm-generic/pgtable-nopud.h>
18400-
18401 static inline pudval_t native_pud_val(pud_t pud)
18402 {
18403 return native_pgd_val(pud.pgd);
18404@@ -268,8 +281,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
18405 return pmd.pmd;
18406 }
18407 #else
18408-#include <asm-generic/pgtable-nopmd.h>
18409-
18410 static inline pmdval_t native_pmd_val(pmd_t pmd)
18411 {
18412 return native_pgd_val(pmd.pud.pgd);
18413@@ -362,7 +373,6 @@ typedef struct page *pgtable_t;
18414
18415 extern pteval_t __supported_pte_mask;
18416 extern void set_nx(void);
18417-extern int nx_enabled;
18418
18419 #define pgprot_writecombine pgprot_writecombine
18420 extern pgprot_t pgprot_writecombine(pgprot_t prot);
18421diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
18422index 8f327184..368fb29 100644
18423--- a/arch/x86/include/asm/preempt.h
18424+++ b/arch/x86/include/asm/preempt.h
18425@@ -84,7 +84,7 @@ static __always_inline void __preempt_count_sub(int val)
18426 */
18427 static __always_inline bool __preempt_count_dec_and_test(void)
18428 {
18429- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
18430+ GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e");
18431 }
18432
18433 /*
18434diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
18435index ec1c935..5cc6023 100644
18436--- a/arch/x86/include/asm/processor.h
18437+++ b/arch/x86/include/asm/processor.h
18438@@ -127,7 +127,7 @@ struct cpuinfo_x86 {
18439 /* Index into per_cpu list: */
18440 u16 cpu_index;
18441 u32 microcode;
18442-};
18443+} __randomize_layout;
18444
18445 #define X86_VENDOR_INTEL 0
18446 #define X86_VENDOR_CYRIX 1
18447@@ -198,9 +198,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
18448 : "memory");
18449 }
18450
18451+/* invpcid (%rdx),%rax */
18452+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
18453+
18454+#define INVPCID_SINGLE_ADDRESS 0UL
18455+#define INVPCID_SINGLE_CONTEXT 1UL
18456+#define INVPCID_ALL_GLOBAL 2UL
18457+#define INVPCID_ALL_NONGLOBAL 3UL
18458+
18459+#define PCID_KERNEL 0UL
18460+#define PCID_USER 1UL
18461+#define PCID_NOFLUSH (1UL << 63)
18462+
18463 static inline void load_cr3(pgd_t *pgdir)
18464 {
18465- write_cr3(__pa(pgdir));
18466+ write_cr3(__pa(pgdir) | PCID_KERNEL);
18467 }
18468
18469 #ifdef CONFIG_X86_32
18470@@ -282,7 +294,7 @@ struct tss_struct {
18471
18472 } ____cacheline_aligned;
18473
18474-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
18475+extern struct tss_struct init_tss[NR_CPUS];
18476
18477 /*
18478 * Save the original ist values for checking stack pointers during debugging
18479@@ -479,6 +491,7 @@ struct thread_struct {
18480 unsigned short ds;
18481 unsigned short fsindex;
18482 unsigned short gsindex;
18483+ unsigned short ss;
18484 #endif
18485 #ifdef CONFIG_X86_32
18486 unsigned long ip;
18487@@ -805,11 +818,18 @@ static inline void spin_lock_prefetch(const void *x)
18488 */
18489 #define TASK_SIZE PAGE_OFFSET
18490 #define TASK_SIZE_MAX TASK_SIZE
18491+
18492+#ifdef CONFIG_PAX_SEGMEXEC
18493+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
18494+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
18495+#else
18496 #define STACK_TOP TASK_SIZE
18497-#define STACK_TOP_MAX STACK_TOP
18498+#endif
18499+
18500+#define STACK_TOP_MAX TASK_SIZE
18501
18502 #define INIT_THREAD { \
18503- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18504+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18505 .vm86_info = NULL, \
18506 .sysenter_cs = __KERNEL_CS, \
18507 .io_bitmap_ptr = NULL, \
18508@@ -823,7 +843,7 @@ static inline void spin_lock_prefetch(const void *x)
18509 */
18510 #define INIT_TSS { \
18511 .x86_tss = { \
18512- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18513+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18514 .ss0 = __KERNEL_DS, \
18515 .ss1 = __KERNEL_CS, \
18516 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
18517@@ -834,11 +854,7 @@ static inline void spin_lock_prefetch(const void *x)
18518 extern unsigned long thread_saved_pc(struct task_struct *tsk);
18519
18520 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
18521-#define KSTK_TOP(info) \
18522-({ \
18523- unsigned long *__ptr = (unsigned long *)(info); \
18524- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
18525-})
18526+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
18527
18528 /*
18529 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
18530@@ -853,7 +869,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18531 #define task_pt_regs(task) \
18532 ({ \
18533 struct pt_regs *__regs__; \
18534- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
18535+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
18536 __regs__ - 1; \
18537 })
18538
18539@@ -869,13 +885,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18540 * particular problem by preventing anything from being mapped
18541 * at the maximum canonical address.
18542 */
18543-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
18544+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
18545
18546 /* This decides where the kernel will search for a free chunk of vm
18547 * space during mmap's.
18548 */
18549 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
18550- 0xc0000000 : 0xFFFFe000)
18551+ 0xc0000000 : 0xFFFFf000)
18552
18553 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
18554 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
18555@@ -886,11 +902,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18556 #define STACK_TOP_MAX TASK_SIZE_MAX
18557
18558 #define INIT_THREAD { \
18559- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18560+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18561 }
18562
18563 #define INIT_TSS { \
18564- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18565+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18566 }
18567
18568 /*
18569@@ -918,6 +934,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
18570 */
18571 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
18572
18573+#ifdef CONFIG_PAX_SEGMEXEC
18574+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
18575+#endif
18576+
18577 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
18578
18579 /* Get/set a process' ability to use the timestamp counter instruction */
18580@@ -962,7 +982,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
18581 return 0;
18582 }
18583
18584-extern unsigned long arch_align_stack(unsigned long sp);
18585+#define arch_align_stack(x) ((x) & ~0xfUL)
18586 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
18587
18588 void default_idle(void);
18589@@ -972,6 +992,6 @@ bool xen_set_default_idle(void);
18590 #define xen_set_default_idle 0
18591 #endif
18592
18593-void stop_this_cpu(void *dummy);
18594+void stop_this_cpu(void *dummy) __noreturn;
18595 void df_debug(struct pt_regs *regs, long error_code);
18596 #endif /* _ASM_X86_PROCESSOR_H */
18597diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
18598index 86fc2bb..bd5049a 100644
18599--- a/arch/x86/include/asm/ptrace.h
18600+++ b/arch/x86/include/asm/ptrace.h
18601@@ -89,28 +89,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
18602 }
18603
18604 /*
18605- * user_mode_vm(regs) determines whether a register set came from user mode.
18606+ * user_mode(regs) determines whether a register set came from user mode.
18607 * This is true if V8086 mode was enabled OR if the register set was from
18608 * protected mode with RPL-3 CS value. This tricky test checks that with
18609 * one comparison. Many places in the kernel can bypass this full check
18610- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
18611+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
18612+ * be used.
18613 */
18614-static inline int user_mode(struct pt_regs *regs)
18615+static inline int user_mode_novm(struct pt_regs *regs)
18616 {
18617 #ifdef CONFIG_X86_32
18618 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
18619 #else
18620- return !!(regs->cs & 3);
18621+ return !!(regs->cs & SEGMENT_RPL_MASK);
18622 #endif
18623 }
18624
18625-static inline int user_mode_vm(struct pt_regs *regs)
18626+static inline int user_mode(struct pt_regs *regs)
18627 {
18628 #ifdef CONFIG_X86_32
18629 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
18630 USER_RPL;
18631 #else
18632- return user_mode(regs);
18633+ return user_mode_novm(regs);
18634 #endif
18635 }
18636
18637@@ -126,15 +127,16 @@ static inline int v8086_mode(struct pt_regs *regs)
18638 #ifdef CONFIG_X86_64
18639 static inline bool user_64bit_mode(struct pt_regs *regs)
18640 {
18641+ unsigned long cs = regs->cs & 0xffff;
18642 #ifndef CONFIG_PARAVIRT
18643 /*
18644 * On non-paravirt systems, this is the only long mode CPL 3
18645 * selector. We do not allow long mode selectors in the LDT.
18646 */
18647- return regs->cs == __USER_CS;
18648+ return cs == __USER_CS;
18649 #else
18650 /* Headers are too twisted for this to go in paravirt.h. */
18651- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
18652+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
18653 #endif
18654 }
18655
18656@@ -185,9 +187,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
18657 * Traps from the kernel do not save sp and ss.
18658 * Use the helper function to retrieve sp.
18659 */
18660- if (offset == offsetof(struct pt_regs, sp) &&
18661- regs->cs == __KERNEL_CS)
18662- return kernel_stack_pointer(regs);
18663+ if (offset == offsetof(struct pt_regs, sp)) {
18664+ unsigned long cs = regs->cs & 0xffff;
18665+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
18666+ return kernel_stack_pointer(regs);
18667+ }
18668 #endif
18669 return *(unsigned long *)((unsigned long)regs + offset);
18670 }
18671diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
18672index ae0e241..e80b10b 100644
18673--- a/arch/x86/include/asm/qrwlock.h
18674+++ b/arch/x86/include/asm/qrwlock.h
18675@@ -7,8 +7,8 @@
18676 #define queue_write_unlock queue_write_unlock
18677 static inline void queue_write_unlock(struct qrwlock *lock)
18678 {
18679- barrier();
18680- ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
18681+ barrier();
18682+ ACCESS_ONCE_RW(*(u8 *)&lock->cnts) = 0;
18683 }
18684 #endif
18685
18686diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
18687index 9c6b890..5305f53 100644
18688--- a/arch/x86/include/asm/realmode.h
18689+++ b/arch/x86/include/asm/realmode.h
18690@@ -22,16 +22,14 @@ struct real_mode_header {
18691 #endif
18692 /* APM/BIOS reboot */
18693 u32 machine_real_restart_asm;
18694-#ifdef CONFIG_X86_64
18695 u32 machine_real_restart_seg;
18696-#endif
18697 };
18698
18699 /* This must match data at trampoline_32/64.S */
18700 struct trampoline_header {
18701 #ifdef CONFIG_X86_32
18702 u32 start;
18703- u16 gdt_pad;
18704+ u16 boot_cs;
18705 u16 gdt_limit;
18706 u32 gdt_base;
18707 #else
18708diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
18709index a82c4f1..ac45053 100644
18710--- a/arch/x86/include/asm/reboot.h
18711+++ b/arch/x86/include/asm/reboot.h
18712@@ -6,13 +6,13 @@
18713 struct pt_regs;
18714
18715 struct machine_ops {
18716- void (*restart)(char *cmd);
18717- void (*halt)(void);
18718- void (*power_off)(void);
18719+ void (* __noreturn restart)(char *cmd);
18720+ void (* __noreturn halt)(void);
18721+ void (* __noreturn power_off)(void);
18722 void (*shutdown)(void);
18723 void (*crash_shutdown)(struct pt_regs *);
18724- void (*emergency_restart)(void);
18725-};
18726+ void (* __noreturn emergency_restart)(void);
18727+} __no_const;
18728
18729 extern struct machine_ops machine_ops;
18730
18731diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
18732index 8f7866a..e442f20 100644
18733--- a/arch/x86/include/asm/rmwcc.h
18734+++ b/arch/x86/include/asm/rmwcc.h
18735@@ -3,7 +3,34 @@
18736
18737 #ifdef CC_HAVE_ASM_GOTO
18738
18739-#define __GEN_RMWcc(fullop, var, cc, ...) \
18740+#ifdef CONFIG_PAX_REFCOUNT
18741+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18742+do { \
18743+ asm_volatile_goto (fullop \
18744+ ";jno 0f\n" \
18745+ fullantiop \
18746+ ";int $4\n0:\n" \
18747+ _ASM_EXTABLE(0b, 0b) \
18748+ ";j" cc " %l[cc_label]" \
18749+ : : "m" (var), ## __VA_ARGS__ \
18750+ : "memory" : cc_label); \
18751+ return 0; \
18752+cc_label: \
18753+ return 1; \
18754+} while (0)
18755+#else
18756+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18757+do { \
18758+ asm_volatile_goto (fullop ";j" cc " %l[cc_label]" \
18759+ : : "m" (var), ## __VA_ARGS__ \
18760+ : "memory" : cc_label); \
18761+ return 0; \
18762+cc_label: \
18763+ return 1; \
18764+} while (0)
18765+#endif
18766+
18767+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18768 do { \
18769 asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
18770 : : "m" (var), ## __VA_ARGS__ \
18771@@ -13,15 +40,46 @@ cc_label: \
18772 return 1; \
18773 } while (0)
18774
18775-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18776- __GEN_RMWcc(op " " arg0, var, cc)
18777+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18778+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18779
18780-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18781- __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
18782+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18783+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18784+
18785+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18786+ __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
18787+
18788+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18789+ __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
18790
18791 #else /* !CC_HAVE_ASM_GOTO */
18792
18793-#define __GEN_RMWcc(fullop, var, cc, ...) \
18794+#ifdef CONFIG_PAX_REFCOUNT
18795+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18796+do { \
18797+ char c; \
18798+ asm volatile (fullop \
18799+ ";jno 0f\n" \
18800+ fullantiop \
18801+ ";int $4\n0:\n" \
18802+ _ASM_EXTABLE(0b, 0b) \
18803+ "; set" cc " %1" \
18804+ : "+m" (var), "=qm" (c) \
18805+ : __VA_ARGS__ : "memory"); \
18806+ return c != 0; \
18807+} while (0)
18808+#else
18809+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18810+do { \
18811+ char c; \
18812+ asm volatile (fullop "; set" cc " %1" \
18813+ : "+m" (var), "=qm" (c) \
18814+ : __VA_ARGS__ : "memory"); \
18815+ return c != 0; \
18816+} while (0)
18817+#endif
18818+
18819+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18820 do { \
18821 char c; \
18822 asm volatile (fullop "; set" cc " %1" \
18823@@ -30,11 +88,17 @@ do { \
18824 return c != 0; \
18825 } while (0)
18826
18827-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18828- __GEN_RMWcc(op " " arg0, var, cc)
18829+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18830+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18831+
18832+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18833+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18834+
18835+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18836+ __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
18837
18838-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18839- __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
18840+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18841+ __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
18842
18843 #endif /* CC_HAVE_ASM_GOTO */
18844
18845diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
18846index cad82c9..2e5c5c1 100644
18847--- a/arch/x86/include/asm/rwsem.h
18848+++ b/arch/x86/include/asm/rwsem.h
18849@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
18850 {
18851 asm volatile("# beginning down_read\n\t"
18852 LOCK_PREFIX _ASM_INC "(%1)\n\t"
18853+
18854+#ifdef CONFIG_PAX_REFCOUNT
18855+ "jno 0f\n"
18856+ LOCK_PREFIX _ASM_DEC "(%1)\n"
18857+ "int $4\n0:\n"
18858+ _ASM_EXTABLE(0b, 0b)
18859+#endif
18860+
18861 /* adds 0x00000001 */
18862 " jns 1f\n"
18863 " call call_rwsem_down_read_failed\n"
18864@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
18865 "1:\n\t"
18866 " mov %1,%2\n\t"
18867 " add %3,%2\n\t"
18868+
18869+#ifdef CONFIG_PAX_REFCOUNT
18870+ "jno 0f\n"
18871+ "sub %3,%2\n"
18872+ "int $4\n0:\n"
18873+ _ASM_EXTABLE(0b, 0b)
18874+#endif
18875+
18876 " jle 2f\n\t"
18877 LOCK_PREFIX " cmpxchg %2,%0\n\t"
18878 " jnz 1b\n\t"
18879@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
18880 long tmp;
18881 asm volatile("# beginning down_write\n\t"
18882 LOCK_PREFIX " xadd %1,(%2)\n\t"
18883+
18884+#ifdef CONFIG_PAX_REFCOUNT
18885+ "jno 0f\n"
18886+ "mov %1,(%2)\n"
18887+ "int $4\n0:\n"
18888+ _ASM_EXTABLE(0b, 0b)
18889+#endif
18890+
18891 /* adds 0xffff0001, returns the old value */
18892 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
18893 /* was the active mask 0 before? */
18894@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
18895 long tmp;
18896 asm volatile("# beginning __up_read\n\t"
18897 LOCK_PREFIX " xadd %1,(%2)\n\t"
18898+
18899+#ifdef CONFIG_PAX_REFCOUNT
18900+ "jno 0f\n"
18901+ "mov %1,(%2)\n"
18902+ "int $4\n0:\n"
18903+ _ASM_EXTABLE(0b, 0b)
18904+#endif
18905+
18906 /* subtracts 1, returns the old value */
18907 " jns 1f\n\t"
18908 " call call_rwsem_wake\n" /* expects old value in %edx */
18909@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
18910 long tmp;
18911 asm volatile("# beginning __up_write\n\t"
18912 LOCK_PREFIX " xadd %1,(%2)\n\t"
18913+
18914+#ifdef CONFIG_PAX_REFCOUNT
18915+ "jno 0f\n"
18916+ "mov %1,(%2)\n"
18917+ "int $4\n0:\n"
18918+ _ASM_EXTABLE(0b, 0b)
18919+#endif
18920+
18921 /* subtracts 0xffff0001, returns the old value */
18922 " jns 1f\n\t"
18923 " call call_rwsem_wake\n" /* expects old value in %edx */
18924@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18925 {
18926 asm volatile("# beginning __downgrade_write\n\t"
18927 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
18928+
18929+#ifdef CONFIG_PAX_REFCOUNT
18930+ "jno 0f\n"
18931+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
18932+ "int $4\n0:\n"
18933+ _ASM_EXTABLE(0b, 0b)
18934+#endif
18935+
18936 /*
18937 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
18938 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
18939@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18940 */
18941 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
18942 {
18943- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
18944+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
18945+
18946+#ifdef CONFIG_PAX_REFCOUNT
18947+ "jno 0f\n"
18948+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
18949+ "int $4\n0:\n"
18950+ _ASM_EXTABLE(0b, 0b)
18951+#endif
18952+
18953 : "+m" (sem->count)
18954 : "er" (delta));
18955 }
18956@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
18957 */
18958 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
18959 {
18960- return delta + xadd(&sem->count, delta);
18961+ return delta + xadd_check_overflow(&sem->count, delta);
18962 }
18963
18964 #endif /* __KERNEL__ */
18965diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
18966index db257a5..b91bc77 100644
18967--- a/arch/x86/include/asm/segment.h
18968+++ b/arch/x86/include/asm/segment.h
18969@@ -73,10 +73,15 @@
18970 * 26 - ESPFIX small SS
18971 * 27 - per-cpu [ offset to per-cpu data area ]
18972 * 28 - stack_canary-20 [ for stack protector ]
18973- * 29 - unused
18974- * 30 - unused
18975+ * 29 - PCI BIOS CS
18976+ * 30 - PCI BIOS DS
18977 * 31 - TSS for double fault handler
18978 */
18979+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
18980+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
18981+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
18982+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
18983+
18984 #define GDT_ENTRY_TLS_MIN 6
18985 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
18986
18987@@ -88,6 +93,8 @@
18988
18989 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
18990
18991+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
18992+
18993 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
18994
18995 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
18996@@ -113,6 +120,12 @@
18997 #define __KERNEL_STACK_CANARY 0
18998 #endif
18999
19000+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
19001+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
19002+
19003+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
19004+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
19005+
19006 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
19007
19008 /*
19009@@ -140,7 +153,7 @@
19010 */
19011
19012 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
19013-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
19014+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
19015
19016
19017 #else
19018@@ -164,6 +177,8 @@
19019 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
19020 #define __USER32_DS __USER_DS
19021
19022+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
19023+
19024 #define GDT_ENTRY_TSS 8 /* needs two entries */
19025 #define GDT_ENTRY_LDT 10 /* needs two entries */
19026 #define GDT_ENTRY_TLS_MIN 12
19027@@ -172,6 +187,8 @@
19028 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
19029 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
19030
19031+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
19032+
19033 /* TLS indexes for 64bit - hardcoded in arch_prctl */
19034 #define FS_TLS 0
19035 #define GS_TLS 1
19036@@ -179,12 +196,14 @@
19037 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
19038 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
19039
19040-#define GDT_ENTRIES 16
19041+#define GDT_ENTRIES 17
19042
19043 #endif
19044
19045 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
19046+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
19047 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
19048+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
19049 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
19050 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
19051 #ifndef CONFIG_PARAVIRT
19052@@ -256,7 +275,7 @@ static inline unsigned long get_limit(unsigned long segment)
19053 {
19054 unsigned long __limit;
19055 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
19056- return __limit + 1;
19057+ return __limit;
19058 }
19059
19060 #endif /* !__ASSEMBLY__ */
19061diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
19062index 8d3120f..352b440 100644
19063--- a/arch/x86/include/asm/smap.h
19064+++ b/arch/x86/include/asm/smap.h
19065@@ -25,11 +25,40 @@
19066
19067 #include <asm/alternative-asm.h>
19068
19069+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19070+#define ASM_PAX_OPEN_USERLAND \
19071+ 661: jmp 663f; \
19072+ .pushsection .altinstr_replacement, "a" ; \
19073+ 662: pushq %rax; nop; \
19074+ .popsection ; \
19075+ .pushsection .altinstructions, "a" ; \
19076+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19077+ .popsection ; \
19078+ call __pax_open_userland; \
19079+ popq %rax; \
19080+ 663:
19081+
19082+#define ASM_PAX_CLOSE_USERLAND \
19083+ 661: jmp 663f; \
19084+ .pushsection .altinstr_replacement, "a" ; \
19085+ 662: pushq %rax; nop; \
19086+ .popsection; \
19087+ .pushsection .altinstructions, "a" ; \
19088+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19089+ .popsection; \
19090+ call __pax_close_userland; \
19091+ popq %rax; \
19092+ 663:
19093+#else
19094+#define ASM_PAX_OPEN_USERLAND
19095+#define ASM_PAX_CLOSE_USERLAND
19096+#endif
19097+
19098 #ifdef CONFIG_X86_SMAP
19099
19100 #define ASM_CLAC \
19101 661: ASM_NOP3 ; \
19102- .pushsection .altinstr_replacement, "ax" ; \
19103+ .pushsection .altinstr_replacement, "a" ; \
19104 662: __ASM_CLAC ; \
19105 .popsection ; \
19106 .pushsection .altinstructions, "a" ; \
19107@@ -38,7 +67,7 @@
19108
19109 #define ASM_STAC \
19110 661: ASM_NOP3 ; \
19111- .pushsection .altinstr_replacement, "ax" ; \
19112+ .pushsection .altinstr_replacement, "a" ; \
19113 662: __ASM_STAC ; \
19114 .popsection ; \
19115 .pushsection .altinstructions, "a" ; \
19116@@ -56,6 +85,37 @@
19117
19118 #include <asm/alternative.h>
19119
19120+#define __HAVE_ARCH_PAX_OPEN_USERLAND
19121+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
19122+
19123+extern void __pax_open_userland(void);
19124+static __always_inline unsigned long pax_open_userland(void)
19125+{
19126+
19127+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19128+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
19129+ :
19130+ : [open] "i" (__pax_open_userland)
19131+ : "memory", "rax");
19132+#endif
19133+
19134+ return 0;
19135+}
19136+
19137+extern void __pax_close_userland(void);
19138+static __always_inline unsigned long pax_close_userland(void)
19139+{
19140+
19141+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19142+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
19143+ :
19144+ : [close] "i" (__pax_close_userland)
19145+ : "memory", "rax");
19146+#endif
19147+
19148+ return 0;
19149+}
19150+
19151 #ifdef CONFIG_X86_SMAP
19152
19153 static __always_inline void clac(void)
19154diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
19155index 8cd1cc3..827e09e 100644
19156--- a/arch/x86/include/asm/smp.h
19157+++ b/arch/x86/include/asm/smp.h
19158@@ -35,7 +35,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
19159 /* cpus sharing the last level cache: */
19160 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
19161 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
19162-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
19163+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
19164
19165 static inline struct cpumask *cpu_sibling_mask(int cpu)
19166 {
19167@@ -78,7 +78,7 @@ struct smp_ops {
19168
19169 void (*send_call_func_ipi)(const struct cpumask *mask);
19170 void (*send_call_func_single_ipi)(int cpu);
19171-};
19172+} __no_const;
19173
19174 /* Globals due to paravirt */
19175 extern void set_cpu_sibling_map(int cpu);
19176@@ -191,14 +191,8 @@ extern unsigned disabled_cpus;
19177 extern int safe_smp_processor_id(void);
19178
19179 #elif defined(CONFIG_X86_64_SMP)
19180-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19181-
19182-#define stack_smp_processor_id() \
19183-({ \
19184- struct thread_info *ti; \
19185- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
19186- ti->cpu; \
19187-})
19188+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19189+#define stack_smp_processor_id() raw_smp_processor_id()
19190 #define safe_smp_processor_id() smp_processor_id()
19191
19192 #endif
19193diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
19194index 6a99859..03cb807 100644
19195--- a/arch/x86/include/asm/stackprotector.h
19196+++ b/arch/x86/include/asm/stackprotector.h
19197@@ -47,7 +47,7 @@
19198 * head_32 for boot CPU and setup_per_cpu_areas() for others.
19199 */
19200 #define GDT_STACK_CANARY_INIT \
19201- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
19202+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
19203
19204 /*
19205 * Initialize the stackprotector canary value.
19206@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
19207
19208 static inline void load_stack_canary_segment(void)
19209 {
19210-#ifdef CONFIG_X86_32
19211+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
19212 asm volatile ("mov %0, %%gs" : : "r" (0));
19213 #endif
19214 }
19215diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
19216index 70bbe39..4ae2bd4 100644
19217--- a/arch/x86/include/asm/stacktrace.h
19218+++ b/arch/x86/include/asm/stacktrace.h
19219@@ -11,28 +11,20 @@
19220
19221 extern int kstack_depth_to_print;
19222
19223-struct thread_info;
19224+struct task_struct;
19225 struct stacktrace_ops;
19226
19227-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
19228- unsigned long *stack,
19229- unsigned long bp,
19230- const struct stacktrace_ops *ops,
19231- void *data,
19232- unsigned long *end,
19233- int *graph);
19234+typedef unsigned long walk_stack_t(struct task_struct *task,
19235+ void *stack_start,
19236+ unsigned long *stack,
19237+ unsigned long bp,
19238+ const struct stacktrace_ops *ops,
19239+ void *data,
19240+ unsigned long *end,
19241+ int *graph);
19242
19243-extern unsigned long
19244-print_context_stack(struct thread_info *tinfo,
19245- unsigned long *stack, unsigned long bp,
19246- const struct stacktrace_ops *ops, void *data,
19247- unsigned long *end, int *graph);
19248-
19249-extern unsigned long
19250-print_context_stack_bp(struct thread_info *tinfo,
19251- unsigned long *stack, unsigned long bp,
19252- const struct stacktrace_ops *ops, void *data,
19253- unsigned long *end, int *graph);
19254+extern walk_stack_t print_context_stack;
19255+extern walk_stack_t print_context_stack_bp;
19256
19257 /* Generic stack tracer with callbacks */
19258
19259@@ -40,7 +32,7 @@ struct stacktrace_ops {
19260 void (*address)(void *data, unsigned long address, int reliable);
19261 /* On negative return stop dumping */
19262 int (*stack)(void *data, char *name);
19263- walk_stack_t walk_stack;
19264+ walk_stack_t *walk_stack;
19265 };
19266
19267 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
19268diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
19269index 751bf4b..a1278b5 100644
19270--- a/arch/x86/include/asm/switch_to.h
19271+++ b/arch/x86/include/asm/switch_to.h
19272@@ -112,7 +112,7 @@ do { \
19273 "call __switch_to\n\t" \
19274 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
19275 __switch_canary \
19276- "movq %P[thread_info](%%rsi),%%r8\n\t" \
19277+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
19278 "movq %%rax,%%rdi\n\t" \
19279 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
19280 "jnz ret_from_fork\n\t" \
19281@@ -123,7 +123,7 @@ do { \
19282 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
19283 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
19284 [_tif_fork] "i" (_TIF_FORK), \
19285- [thread_info] "i" (offsetof(struct task_struct, stack)), \
19286+ [thread_info] "m" (current_tinfo), \
19287 [current_task] "m" (current_task) \
19288 __switch_canary_iparam \
19289 : "memory", "cc" __EXTRA_CLOBBER)
19290diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
19291index 1d4e4f2..506db18 100644
19292--- a/arch/x86/include/asm/thread_info.h
19293+++ b/arch/x86/include/asm/thread_info.h
19294@@ -24,7 +24,6 @@ struct exec_domain;
19295 #include <linux/atomic.h>
19296
19297 struct thread_info {
19298- struct task_struct *task; /* main task structure */
19299 struct exec_domain *exec_domain; /* execution domain */
19300 __u32 flags; /* low level flags */
19301 __u32 status; /* thread synchronous flags */
19302@@ -32,13 +31,13 @@ struct thread_info {
19303 int saved_preempt_count;
19304 mm_segment_t addr_limit;
19305 void __user *sysenter_return;
19306+ unsigned long lowest_stack;
19307 unsigned int sig_on_uaccess_error:1;
19308 unsigned int uaccess_err:1; /* uaccess failed */
19309 };
19310
19311-#define INIT_THREAD_INFO(tsk) \
19312+#define INIT_THREAD_INFO \
19313 { \
19314- .task = &tsk, \
19315 .exec_domain = &default_exec_domain, \
19316 .flags = 0, \
19317 .cpu = 0, \
19318@@ -46,7 +45,7 @@ struct thread_info {
19319 .addr_limit = KERNEL_DS, \
19320 }
19321
19322-#define init_thread_info (init_thread_union.thread_info)
19323+#define init_thread_info (init_thread_union.stack)
19324 #define init_stack (init_thread_union.stack)
19325
19326 #else /* !__ASSEMBLY__ */
19327@@ -86,6 +85,7 @@ struct thread_info {
19328 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
19329 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
19330 #define TIF_X32 30 /* 32-bit native x86-64 binary */
19331+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
19332
19333 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
19334 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
19335@@ -109,17 +109,18 @@ struct thread_info {
19336 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
19337 #define _TIF_ADDR32 (1 << TIF_ADDR32)
19338 #define _TIF_X32 (1 << TIF_X32)
19339+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
19340
19341 /* work to do in syscall_trace_enter() */
19342 #define _TIF_WORK_SYSCALL_ENTRY \
19343 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
19344 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
19345- _TIF_NOHZ)
19346+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19347
19348 /* work to do in syscall_trace_leave() */
19349 #define _TIF_WORK_SYSCALL_EXIT \
19350 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
19351- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
19352+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
19353
19354 /* work to do on interrupt/exception return */
19355 #define _TIF_WORK_MASK \
19356@@ -130,7 +131,7 @@ struct thread_info {
19357 /* work to do on any return to user space */
19358 #define _TIF_ALLWORK_MASK \
19359 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
19360- _TIF_NOHZ)
19361+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19362
19363 /* Only used for 64 bit */
19364 #define _TIF_DO_NOTIFY_MASK \
19365@@ -145,7 +146,6 @@ struct thread_info {
19366 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
19367
19368 #define STACK_WARN (THREAD_SIZE/8)
19369-#define KERNEL_STACK_OFFSET (5*(BITS_PER_LONG/8))
19370
19371 /*
19372 * macros/functions for gaining access to the thread information structure
19373@@ -156,12 +156,11 @@ struct thread_info {
19374
19375 DECLARE_PER_CPU(unsigned long, kernel_stack);
19376
19377+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
19378+
19379 static inline struct thread_info *current_thread_info(void)
19380 {
19381- struct thread_info *ti;
19382- ti = (void *)(this_cpu_read_stable(kernel_stack) +
19383- KERNEL_STACK_OFFSET - THREAD_SIZE);
19384- return ti;
19385+ return this_cpu_read_stable(current_tinfo);
19386 }
19387
19388 static inline unsigned long current_stack_pointer(void)
19389@@ -179,14 +178,7 @@ static inline unsigned long current_stack_pointer(void)
19390
19391 /* how to get the thread information struct from ASM */
19392 #define GET_THREAD_INFO(reg) \
19393- _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
19394- _ASM_SUB $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg ;
19395-
19396-/*
19397- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
19398- * a certain register (to be used in assembler memory operands).
19399- */
19400-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
19401+ _ASM_MOV PER_CPU_VAR(current_tinfo),reg ;
19402
19403 #endif
19404
19405@@ -242,5 +234,12 @@ static inline bool is_ia32_task(void)
19406 extern void arch_task_cache_init(void);
19407 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
19408 extern void arch_release_task_struct(struct task_struct *tsk);
19409+
19410+#define __HAVE_THREAD_FUNCTIONS
19411+#define task_thread_info(task) (&(task)->tinfo)
19412+#define task_stack_page(task) ((task)->stack)
19413+#define setup_thread_stack(p, org) do {} while (0)
19414+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
19415+
19416 #endif
19417 #endif /* _ASM_X86_THREAD_INFO_H */
19418diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
19419index cd79194..e7a9491 100644
19420--- a/arch/x86/include/asm/tlbflush.h
19421+++ b/arch/x86/include/asm/tlbflush.h
19422@@ -86,18 +86,44 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask)
19423
19424 static inline void __native_flush_tlb(void)
19425 {
19426+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19427+ u64 descriptor[2];
19428+
19429+ descriptor[0] = PCID_KERNEL;
19430+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_NONGLOBAL) : "memory");
19431+ return;
19432+ }
19433+
19434+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19435+ if (static_cpu_has(X86_FEATURE_PCID)) {
19436+ unsigned int cpu = raw_get_cpu();
19437+
19438+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
19439+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
19440+ raw_put_cpu_no_resched();
19441+ return;
19442+ }
19443+#endif
19444+
19445 native_write_cr3(native_read_cr3());
19446 }
19447
19448 static inline void __native_flush_tlb_global_irq_disabled(void)
19449 {
19450- unsigned long cr4;
19451+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19452+ u64 descriptor[2];
19453
19454- cr4 = this_cpu_read(cpu_tlbstate.cr4);
19455- /* clear PGE */
19456- native_write_cr4(cr4 & ~X86_CR4_PGE);
19457- /* write old PGE again and flush TLBs */
19458- native_write_cr4(cr4);
19459+ descriptor[0] = PCID_KERNEL;
19460+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
19461+ } else {
19462+ unsigned long cr4;
19463+
19464+ cr4 = this_cpu_read(cpu_tlbstate.cr4);
19465+ /* clear PGE */
19466+ native_write_cr4(cr4 & ~X86_CR4_PGE);
19467+ /* write old PGE again and flush TLBs */
19468+ native_write_cr4(cr4);
19469+ }
19470 }
19471
19472 static inline void __native_flush_tlb_global(void)
19473@@ -118,6 +144,41 @@ static inline void __native_flush_tlb_global(void)
19474
19475 static inline void __native_flush_tlb_single(unsigned long addr)
19476 {
19477+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19478+ u64 descriptor[2];
19479+
19480+ descriptor[0] = PCID_KERNEL;
19481+ descriptor[1] = addr;
19482+
19483+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19484+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
19485+ if (addr < TASK_SIZE_MAX)
19486+ descriptor[1] += pax_user_shadow_base;
19487+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19488+ }
19489+
19490+ descriptor[0] = PCID_USER;
19491+ descriptor[1] = addr;
19492+#endif
19493+
19494+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19495+ return;
19496+ }
19497+
19498+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19499+ if (static_cpu_has(X86_FEATURE_PCID)) {
19500+ unsigned int cpu = raw_get_cpu();
19501+
19502+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
19503+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19504+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
19505+ raw_put_cpu_no_resched();
19506+
19507+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
19508+ addr += pax_user_shadow_base;
19509+ }
19510+#endif
19511+
19512 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19513 }
19514
19515diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
19516index ace9dec..3f9e253 100644
19517--- a/arch/x86/include/asm/uaccess.h
19518+++ b/arch/x86/include/asm/uaccess.h
19519@@ -7,6 +7,7 @@
19520 #include <linux/compiler.h>
19521 #include <linux/thread_info.h>
19522 #include <linux/string.h>
19523+#include <linux/spinlock.h>
19524 #include <asm/asm.h>
19525 #include <asm/page.h>
19526 #include <asm/smap.h>
19527@@ -29,7 +30,12 @@
19528
19529 #define get_ds() (KERNEL_DS)
19530 #define get_fs() (current_thread_info()->addr_limit)
19531+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19532+void __set_fs(mm_segment_t x);
19533+void set_fs(mm_segment_t x);
19534+#else
19535 #define set_fs(x) (current_thread_info()->addr_limit = (x))
19536+#endif
19537
19538 #define segment_eq(a, b) ((a).seg == (b).seg)
19539
19540@@ -85,8 +91,36 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
19541 * checks that the pointer is in the user space range - after calling
19542 * this function, memory access functions may still return -EFAULT.
19543 */
19544-#define access_ok(type, addr, size) \
19545- likely(!__range_not_ok(addr, size, user_addr_max()))
19546+extern int _cond_resched(void);
19547+#define access_ok_noprefault(type, addr, size) (likely(!__range_not_ok(addr, size, user_addr_max())))
19548+#define access_ok(type, addr, size) \
19549+({ \
19550+ unsigned long __size = size; \
19551+ unsigned long __addr = (unsigned long)addr; \
19552+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
19553+ if (__ret_ao && __size) { \
19554+ unsigned long __addr_ao = __addr & PAGE_MASK; \
19555+ unsigned long __end_ao = __addr + __size - 1; \
19556+ if (unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
19557+ while (__addr_ao <= __end_ao) { \
19558+ char __c_ao; \
19559+ __addr_ao += PAGE_SIZE; \
19560+ if (__size > PAGE_SIZE) \
19561+ _cond_resched(); \
19562+ if (__get_user(__c_ao, (char __user *)__addr)) \
19563+ break; \
19564+ if (type != VERIFY_WRITE) { \
19565+ __addr = __addr_ao; \
19566+ continue; \
19567+ } \
19568+ if (__put_user(__c_ao, (char __user *)__addr)) \
19569+ break; \
19570+ __addr = __addr_ao; \
19571+ } \
19572+ } \
19573+ } \
19574+ __ret_ao; \
19575+})
19576
19577 /*
19578 * The exception table consists of pairs of addresses relative to the
19579@@ -134,11 +168,13 @@ extern int __get_user_8(void);
19580 extern int __get_user_bad(void);
19581
19582 /*
19583- * This is a type: either unsigned long, if the argument fits into
19584- * that type, or otherwise unsigned long long.
19585+ * This is a type: either (un)signed int, if the argument fits into
19586+ * that type, or otherwise (un)signed long long.
19587 */
19588 #define __inttype(x) \
19589-__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19590+__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0U), \
19591+ __builtin_choose_expr(__type_is_unsigned(__typeof__(x)), 0ULL, 0LL),\
19592+ __builtin_choose_expr(__type_is_unsigned(__typeof__(x)), 0U, 0)))
19593
19594 /**
19595 * get_user: - Get a simple variable from user space.
19596@@ -176,10 +212,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19597 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
19598 __chk_user_ptr(ptr); \
19599 might_fault(); \
19600+ pax_open_userland(); \
19601 asm volatile("call __get_user_%P3" \
19602 : "=a" (__ret_gu), "=r" (__val_gu) \
19603 : "0" (ptr), "i" (sizeof(*(ptr)))); \
19604 (x) = (__force __typeof__(*(ptr))) __val_gu; \
19605+ pax_close_userland(); \
19606 __ret_gu; \
19607 })
19608
19609@@ -187,13 +225,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19610 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
19611 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
19612
19613-
19614+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19615+#define __copyuser_seg "gs;"
19616+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
19617+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
19618+#else
19619+#define __copyuser_seg
19620+#define __COPYUSER_SET_ES
19621+#define __COPYUSER_RESTORE_ES
19622+#endif
19623
19624 #ifdef CONFIG_X86_32
19625 #define __put_user_asm_u64(x, addr, err, errret) \
19626 asm volatile(ASM_STAC "\n" \
19627- "1: movl %%eax,0(%2)\n" \
19628- "2: movl %%edx,4(%2)\n" \
19629+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
19630+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
19631 "3: " ASM_CLAC "\n" \
19632 ".section .fixup,\"ax\"\n" \
19633 "4: movl %3,%0\n" \
19634@@ -206,8 +252,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19635
19636 #define __put_user_asm_ex_u64(x, addr) \
19637 asm volatile(ASM_STAC "\n" \
19638- "1: movl %%eax,0(%1)\n" \
19639- "2: movl %%edx,4(%1)\n" \
19640+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
19641+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
19642 "3: " ASM_CLAC "\n" \
19643 _ASM_EXTABLE_EX(1b, 2b) \
19644 _ASM_EXTABLE_EX(2b, 3b) \
19645@@ -257,7 +303,8 @@ extern void __put_user_8(void);
19646 __typeof__(*(ptr)) __pu_val; \
19647 __chk_user_ptr(ptr); \
19648 might_fault(); \
19649- __pu_val = x; \
19650+ __pu_val = (x); \
19651+ pax_open_userland(); \
19652 switch (sizeof(*(ptr))) { \
19653 case 1: \
19654 __put_user_x(1, __pu_val, ptr, __ret_pu); \
19655@@ -275,6 +322,7 @@ extern void __put_user_8(void);
19656 __put_user_x(X, __pu_val, ptr, __ret_pu); \
19657 break; \
19658 } \
19659+ pax_close_userland(); \
19660 __ret_pu; \
19661 })
19662
19663@@ -355,8 +403,10 @@ do { \
19664 } while (0)
19665
19666 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19667+do { \
19668+ pax_open_userland(); \
19669 asm volatile(ASM_STAC "\n" \
19670- "1: mov"itype" %2,%"rtype"1\n" \
19671+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
19672 "2: " ASM_CLAC "\n" \
19673 ".section .fixup,\"ax\"\n" \
19674 "3: mov %3,%0\n" \
19675@@ -364,8 +414,10 @@ do { \
19676 " jmp 2b\n" \
19677 ".previous\n" \
19678 _ASM_EXTABLE(1b, 3b) \
19679- : "=r" (err), ltype(x) \
19680- : "m" (__m(addr)), "i" (errret), "0" (err))
19681+ : "=r" (err), ltype (x) \
19682+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
19683+ pax_close_userland(); \
19684+} while (0)
19685
19686 #define __get_user_size_ex(x, ptr, size) \
19687 do { \
19688@@ -389,7 +441,7 @@ do { \
19689 } while (0)
19690
19691 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
19692- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
19693+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
19694 "2:\n" \
19695 _ASM_EXTABLE_EX(1b, 2b) \
19696 : ltype(x) : "m" (__m(addr)))
19697@@ -406,13 +458,24 @@ do { \
19698 int __gu_err; \
19699 unsigned long __gu_val; \
19700 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
19701- (x) = (__force __typeof__(*(ptr)))__gu_val; \
19702+ (x) = (__typeof__(*(ptr)))__gu_val; \
19703 __gu_err; \
19704 })
19705
19706 /* FIXME: this hack is definitely wrong -AK */
19707 struct __large_struct { unsigned long buf[100]; };
19708-#define __m(x) (*(struct __large_struct __user *)(x))
19709+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19710+#define ____m(x) \
19711+({ \
19712+ unsigned long ____x = (unsigned long)(x); \
19713+ if (____x < pax_user_shadow_base) \
19714+ ____x += pax_user_shadow_base; \
19715+ (typeof(x))____x; \
19716+})
19717+#else
19718+#define ____m(x) (x)
19719+#endif
19720+#define __m(x) (*(struct __large_struct __user *)____m(x))
19721
19722 /*
19723 * Tell gcc we read from memory instead of writing: this is because
19724@@ -420,8 +483,10 @@ struct __large_struct { unsigned long buf[100]; };
19725 * aliasing issues.
19726 */
19727 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19728+do { \
19729+ pax_open_userland(); \
19730 asm volatile(ASM_STAC "\n" \
19731- "1: mov"itype" %"rtype"1,%2\n" \
19732+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
19733 "2: " ASM_CLAC "\n" \
19734 ".section .fixup,\"ax\"\n" \
19735 "3: mov %3,%0\n" \
19736@@ -429,10 +494,12 @@ struct __large_struct { unsigned long buf[100]; };
19737 ".previous\n" \
19738 _ASM_EXTABLE(1b, 3b) \
19739 : "=r"(err) \
19740- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
19741+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
19742+ pax_close_userland(); \
19743+} while (0)
19744
19745 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
19746- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
19747+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
19748 "2:\n" \
19749 _ASM_EXTABLE_EX(1b, 2b) \
19750 : : ltype(x), "m" (__m(addr)))
19751@@ -442,11 +509,13 @@ struct __large_struct { unsigned long buf[100]; };
19752 */
19753 #define uaccess_try do { \
19754 current_thread_info()->uaccess_err = 0; \
19755+ pax_open_userland(); \
19756 stac(); \
19757 barrier();
19758
19759 #define uaccess_catch(err) \
19760 clac(); \
19761+ pax_close_userland(); \
19762 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
19763 } while (0)
19764
19765@@ -471,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
19766 * On error, the variable @x is set to zero.
19767 */
19768
19769+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19770+#define __get_user(x, ptr) get_user((x), (ptr))
19771+#else
19772 #define __get_user(x, ptr) \
19773 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
19774+#endif
19775
19776 /**
19777 * __put_user: - Write a simple value into user space, with less checking.
19778@@ -494,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
19779 * Returns zero on success, or -EFAULT on error.
19780 */
19781
19782+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19783+#define __put_user(x, ptr) put_user((x), (ptr))
19784+#else
19785 #define __put_user(x, ptr) \
19786 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
19787+#endif
19788
19789 #define __get_user_unaligned __get_user
19790 #define __put_user_unaligned __put_user
19791@@ -513,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
19792 #define get_user_ex(x, ptr) do { \
19793 unsigned long __gue_val; \
19794 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
19795- (x) = (__force __typeof__(*(ptr)))__gue_val; \
19796+ (x) = (__typeof__(*(ptr)))__gue_val; \
19797 } while (0)
19798
19799 #define put_user_try uaccess_try
19800@@ -531,7 +608,7 @@ extern __must_check long strlen_user(const char __user *str);
19801 extern __must_check long strnlen_user(const char __user *str, long n);
19802
19803 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
19804-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
19805+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
19806
19807 extern void __cmpxchg_wrong_size(void)
19808 __compiletime_error("Bad argument size for cmpxchg");
19809@@ -542,18 +619,19 @@ extern void __cmpxchg_wrong_size(void)
19810 __typeof__(ptr) __uval = (uval); \
19811 __typeof__(*(ptr)) __old = (old); \
19812 __typeof__(*(ptr)) __new = (new); \
19813+ pax_open_userland(); \
19814 switch (size) { \
19815 case 1: \
19816 { \
19817 asm volatile("\t" ASM_STAC "\n" \
19818- "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
19819+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgb %4, %2\n"\
19820 "2:\t" ASM_CLAC "\n" \
19821 "\t.section .fixup, \"ax\"\n" \
19822 "3:\tmov %3, %0\n" \
19823 "\tjmp 2b\n" \
19824 "\t.previous\n" \
19825 _ASM_EXTABLE(1b, 3b) \
19826- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19827+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19828 : "i" (-EFAULT), "q" (__new), "1" (__old) \
19829 : "memory" \
19830 ); \
19831@@ -562,14 +640,14 @@ extern void __cmpxchg_wrong_size(void)
19832 case 2: \
19833 { \
19834 asm volatile("\t" ASM_STAC "\n" \
19835- "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
19836+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgw %4, %2\n"\
19837 "2:\t" ASM_CLAC "\n" \
19838 "\t.section .fixup, \"ax\"\n" \
19839 "3:\tmov %3, %0\n" \
19840 "\tjmp 2b\n" \
19841 "\t.previous\n" \
19842 _ASM_EXTABLE(1b, 3b) \
19843- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19844+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19845 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19846 : "memory" \
19847 ); \
19848@@ -578,14 +656,14 @@ extern void __cmpxchg_wrong_size(void)
19849 case 4: \
19850 { \
19851 asm volatile("\t" ASM_STAC "\n" \
19852- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
19853+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"\
19854 "2:\t" ASM_CLAC "\n" \
19855 "\t.section .fixup, \"ax\"\n" \
19856 "3:\tmov %3, %0\n" \
19857 "\tjmp 2b\n" \
19858 "\t.previous\n" \
19859 _ASM_EXTABLE(1b, 3b) \
19860- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19861+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19862 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19863 : "memory" \
19864 ); \
19865@@ -597,14 +675,14 @@ extern void __cmpxchg_wrong_size(void)
19866 __cmpxchg_wrong_size(); \
19867 \
19868 asm volatile("\t" ASM_STAC "\n" \
19869- "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
19870+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgq %4, %2\n"\
19871 "2:\t" ASM_CLAC "\n" \
19872 "\t.section .fixup, \"ax\"\n" \
19873 "3:\tmov %3, %0\n" \
19874 "\tjmp 2b\n" \
19875 "\t.previous\n" \
19876 _ASM_EXTABLE(1b, 3b) \
19877- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19878+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19879 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19880 : "memory" \
19881 ); \
19882@@ -613,6 +691,7 @@ extern void __cmpxchg_wrong_size(void)
19883 default: \
19884 __cmpxchg_wrong_size(); \
19885 } \
19886+ pax_close_userland(); \
19887 *__uval = __old; \
19888 __ret; \
19889 })
19890@@ -636,17 +715,6 @@ extern struct movsl_mask {
19891
19892 #define ARCH_HAS_NOCACHE_UACCESS 1
19893
19894-#ifdef CONFIG_X86_32
19895-# include <asm/uaccess_32.h>
19896-#else
19897-# include <asm/uaccess_64.h>
19898-#endif
19899-
19900-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
19901- unsigned n);
19902-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19903- unsigned n);
19904-
19905 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
19906 # define copy_user_diag __compiletime_error
19907 #else
19908@@ -656,7 +724,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19909 extern void copy_user_diag("copy_from_user() buffer size is too small")
19910 copy_from_user_overflow(void);
19911 extern void copy_user_diag("copy_to_user() buffer size is too small")
19912-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19913+copy_to_user_overflow(void);
19914
19915 #undef copy_user_diag
19916
19917@@ -669,7 +737,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
19918
19919 extern void
19920 __compiletime_warning("copy_to_user() buffer size is not provably correct")
19921-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19922+__copy_to_user_overflow(void) __asm__("copy_to_user_overflow");
19923 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
19924
19925 #else
19926@@ -684,10 +752,16 @@ __copy_from_user_overflow(int size, unsigned long count)
19927
19928 #endif
19929
19930+#ifdef CONFIG_X86_32
19931+# include <asm/uaccess_32.h>
19932+#else
19933+# include <asm/uaccess_64.h>
19934+#endif
19935+
19936 static inline unsigned long __must_check
19937 copy_from_user(void *to, const void __user *from, unsigned long n)
19938 {
19939- int sz = __compiletime_object_size(to);
19940+ size_t sz = __compiletime_object_size(to);
19941
19942 might_fault();
19943
19944@@ -709,12 +783,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19945 * case, and do only runtime checking for non-constant sizes.
19946 */
19947
19948- if (likely(sz < 0 || sz >= n))
19949- n = _copy_from_user(to, from, n);
19950- else if(__builtin_constant_p(n))
19951- copy_from_user_overflow();
19952- else
19953- __copy_from_user_overflow(sz, n);
19954+ if (likely(sz != (size_t)-1 && sz < n)) {
19955+ if(__builtin_constant_p(n))
19956+ copy_from_user_overflow();
19957+ else
19958+ __copy_from_user_overflow(sz, n);
19959+ } else if (access_ok(VERIFY_READ, from, n))
19960+ n = __copy_from_user(to, from, n);
19961+ else if ((long)n > 0)
19962+ memset(to, 0, n);
19963
19964 return n;
19965 }
19966@@ -722,17 +799,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19967 static inline unsigned long __must_check
19968 copy_to_user(void __user *to, const void *from, unsigned long n)
19969 {
19970- int sz = __compiletime_object_size(from);
19971+ size_t sz = __compiletime_object_size(from);
19972
19973 might_fault();
19974
19975 /* See the comment in copy_from_user() above. */
19976- if (likely(sz < 0 || sz >= n))
19977- n = _copy_to_user(to, from, n);
19978- else if(__builtin_constant_p(n))
19979- copy_to_user_overflow();
19980- else
19981- __copy_to_user_overflow(sz, n);
19982+ if (likely(sz != (size_t)-1 && sz < n)) {
19983+ if(__builtin_constant_p(n))
19984+ copy_to_user_overflow();
19985+ else
19986+ __copy_to_user_overflow(sz, n);
19987+ } else if (access_ok(VERIFY_WRITE, to, n))
19988+ n = __copy_to_user(to, from, n);
19989
19990 return n;
19991 }
19992diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
19993index 3c03a5d..edb68ae 100644
19994--- a/arch/x86/include/asm/uaccess_32.h
19995+++ b/arch/x86/include/asm/uaccess_32.h
19996@@ -40,9 +40,14 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
19997 * anything, so this is accurate.
19998 */
19999
20000-static __always_inline unsigned long __must_check
20001+static __always_inline __size_overflow(3) unsigned long __must_check
20002 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
20003 {
20004+ if ((long)n < 0)
20005+ return n;
20006+
20007+ check_object_size(from, n, true);
20008+
20009 if (__builtin_constant_p(n)) {
20010 unsigned long ret;
20011
20012@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
20013 __copy_to_user(void __user *to, const void *from, unsigned long n)
20014 {
20015 might_fault();
20016+
20017 return __copy_to_user_inatomic(to, from, n);
20018 }
20019
20020-static __always_inline unsigned long
20021+static __always_inline __size_overflow(3) unsigned long
20022 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
20023 {
20024+ if ((long)n < 0)
20025+ return n;
20026+
20027 /* Avoid zeroing the tail if the copy fails..
20028 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
20029 * but as the zeroing behaviour is only significant when n is not
20030@@ -137,6 +146,12 @@ static __always_inline unsigned long
20031 __copy_from_user(void *to, const void __user *from, unsigned long n)
20032 {
20033 might_fault();
20034+
20035+ if ((long)n < 0)
20036+ return n;
20037+
20038+ check_object_size(to, n, false);
20039+
20040 if (__builtin_constant_p(n)) {
20041 unsigned long ret;
20042
20043@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
20044 const void __user *from, unsigned long n)
20045 {
20046 might_fault();
20047+
20048+ if ((long)n < 0)
20049+ return n;
20050+
20051 if (__builtin_constant_p(n)) {
20052 unsigned long ret;
20053
20054@@ -181,7 +200,10 @@ static __always_inline unsigned long
20055 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
20056 unsigned long n)
20057 {
20058- return __copy_from_user_ll_nocache_nozero(to, from, n);
20059+ if ((long)n < 0)
20060+ return n;
20061+
20062+ return __copy_from_user_ll_nocache_nozero(to, from, n);
20063 }
20064
20065 #endif /* _ASM_X86_UACCESS_32_H */
20066diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
20067index f2f9b39..2ae1bf8 100644
20068--- a/arch/x86/include/asm/uaccess_64.h
20069+++ b/arch/x86/include/asm/uaccess_64.h
20070@@ -10,6 +10,9 @@
20071 #include <asm/alternative.h>
20072 #include <asm/cpufeature.h>
20073 #include <asm/page.h>
20074+#include <asm/pgtable.h>
20075+
20076+#define set_fs(x) (current_thread_info()->addr_limit = (x))
20077
20078 /*
20079 * Copy To/From Userspace
20080@@ -23,8 +26,8 @@ copy_user_generic_string(void *to, const void *from, unsigned len);
20081 __must_check unsigned long
20082 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
20083
20084-static __always_inline __must_check unsigned long
20085-copy_user_generic(void *to, const void *from, unsigned len)
20086+static __always_inline __must_check __size_overflow(3) unsigned long
20087+copy_user_generic(void *to, const void *from, unsigned long len)
20088 {
20089 unsigned ret;
20090
20091@@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *from, unsigned len)
20092 }
20093
20094 __must_check unsigned long
20095-copy_in_user(void __user *to, const void __user *from, unsigned len);
20096+copy_in_user(void __user *to, const void __user *from, unsigned long len);
20097
20098 static __always_inline __must_check
20099-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
20100+unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size)
20101 {
20102- int ret = 0;
20103+ size_t sz = __compiletime_object_size(dst);
20104+ unsigned ret = 0;
20105+
20106+ if (size > INT_MAX)
20107+ return size;
20108+
20109+ check_object_size(dst, size, false);
20110+
20111+#ifdef CONFIG_PAX_MEMORY_UDEREF
20112+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20113+ return size;
20114+#endif
20115+
20116+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20117+ if(__builtin_constant_p(size))
20118+ copy_from_user_overflow();
20119+ else
20120+ __copy_from_user_overflow(sz, size);
20121+ return size;
20122+ }
20123
20124 if (!__builtin_constant_p(size))
20125- return copy_user_generic(dst, (__force void *)src, size);
20126+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20127 switch (size) {
20128- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
20129+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
20130 ret, "b", "b", "=q", 1);
20131 return ret;
20132- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
20133+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
20134 ret, "w", "w", "=r", 2);
20135 return ret;
20136- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
20137+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
20138 ret, "l", "k", "=r", 4);
20139 return ret;
20140- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
20141+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20142 ret, "q", "", "=r", 8);
20143 return ret;
20144 case 10:
20145- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20146+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20147 ret, "q", "", "=r", 10);
20148 if (unlikely(ret))
20149 return ret;
20150 __get_user_asm(*(u16 *)(8 + (char *)dst),
20151- (u16 __user *)(8 + (char __user *)src),
20152+ (const u16 __user *)(8 + (const char __user *)src),
20153 ret, "w", "w", "=r", 2);
20154 return ret;
20155 case 16:
20156- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20157+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20158 ret, "q", "", "=r", 16);
20159 if (unlikely(ret))
20160 return ret;
20161 __get_user_asm(*(u64 *)(8 + (char *)dst),
20162- (u64 __user *)(8 + (char __user *)src),
20163+ (const u64 __user *)(8 + (const char __user *)src),
20164 ret, "q", "", "=r", 8);
20165 return ret;
20166 default:
20167- return copy_user_generic(dst, (__force void *)src, size);
20168+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20169 }
20170 }
20171
20172 static __always_inline __must_check
20173-int __copy_from_user(void *dst, const void __user *src, unsigned size)
20174+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
20175 {
20176 might_fault();
20177 return __copy_from_user_nocheck(dst, src, size);
20178 }
20179
20180 static __always_inline __must_check
20181-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
20182+unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size)
20183 {
20184- int ret = 0;
20185+ size_t sz = __compiletime_object_size(src);
20186+ unsigned ret = 0;
20187+
20188+ if (size > INT_MAX)
20189+ return size;
20190+
20191+ check_object_size(src, size, true);
20192+
20193+#ifdef CONFIG_PAX_MEMORY_UDEREF
20194+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20195+ return size;
20196+#endif
20197+
20198+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20199+ if(__builtin_constant_p(size))
20200+ copy_to_user_overflow();
20201+ else
20202+ __copy_to_user_overflow(sz, size);
20203+ return size;
20204+ }
20205
20206 if (!__builtin_constant_p(size))
20207- return copy_user_generic((__force void *)dst, src, size);
20208+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20209 switch (size) {
20210- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
20211+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
20212 ret, "b", "b", "iq", 1);
20213 return ret;
20214- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
20215+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
20216 ret, "w", "w", "ir", 2);
20217 return ret;
20218- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
20219+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
20220 ret, "l", "k", "ir", 4);
20221 return ret;
20222- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
20223+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20224 ret, "q", "", "er", 8);
20225 return ret;
20226 case 10:
20227- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20228+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20229 ret, "q", "", "er", 10);
20230 if (unlikely(ret))
20231 return ret;
20232 asm("":::"memory");
20233- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
20234+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
20235 ret, "w", "w", "ir", 2);
20236 return ret;
20237 case 16:
20238- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20239+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20240 ret, "q", "", "er", 16);
20241 if (unlikely(ret))
20242 return ret;
20243 asm("":::"memory");
20244- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
20245+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
20246 ret, "q", "", "er", 8);
20247 return ret;
20248 default:
20249- return copy_user_generic((__force void *)dst, src, size);
20250+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20251 }
20252 }
20253
20254 static __always_inline __must_check
20255-int __copy_to_user(void __user *dst, const void *src, unsigned size)
20256+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
20257 {
20258 might_fault();
20259 return __copy_to_user_nocheck(dst, src, size);
20260 }
20261
20262 static __always_inline __must_check
20263-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20264+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20265 {
20266- int ret = 0;
20267+ unsigned ret = 0;
20268
20269 might_fault();
20270+
20271+ if (size > INT_MAX)
20272+ return size;
20273+
20274+#ifdef CONFIG_PAX_MEMORY_UDEREF
20275+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20276+ return size;
20277+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20278+ return size;
20279+#endif
20280+
20281 if (!__builtin_constant_p(size))
20282- return copy_user_generic((__force void *)dst,
20283- (__force void *)src, size);
20284+ return copy_user_generic((__force_kernel void *)____m(dst),
20285+ (__force_kernel const void *)____m(src), size);
20286 switch (size) {
20287 case 1: {
20288 u8 tmp;
20289- __get_user_asm(tmp, (u8 __user *)src,
20290+ __get_user_asm(tmp, (const u8 __user *)src,
20291 ret, "b", "b", "=q", 1);
20292 if (likely(!ret))
20293 __put_user_asm(tmp, (u8 __user *)dst,
20294@@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20295 }
20296 case 2: {
20297 u16 tmp;
20298- __get_user_asm(tmp, (u16 __user *)src,
20299+ __get_user_asm(tmp, (const u16 __user *)src,
20300 ret, "w", "w", "=r", 2);
20301 if (likely(!ret))
20302 __put_user_asm(tmp, (u16 __user *)dst,
20303@@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20304
20305 case 4: {
20306 u32 tmp;
20307- __get_user_asm(tmp, (u32 __user *)src,
20308+ __get_user_asm(tmp, (const u32 __user *)src,
20309 ret, "l", "k", "=r", 4);
20310 if (likely(!ret))
20311 __put_user_asm(tmp, (u32 __user *)dst,
20312@@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20313 }
20314 case 8: {
20315 u64 tmp;
20316- __get_user_asm(tmp, (u64 __user *)src,
20317+ __get_user_asm(tmp, (const u64 __user *)src,
20318 ret, "q", "", "=r", 8);
20319 if (likely(!ret))
20320 __put_user_asm(tmp, (u64 __user *)dst,
20321@@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20322 return ret;
20323 }
20324 default:
20325- return copy_user_generic((__force void *)dst,
20326- (__force void *)src, size);
20327+ return copy_user_generic((__force_kernel void *)____m(dst),
20328+ (__force_kernel const void *)____m(src), size);
20329 }
20330 }
20331
20332-static __must_check __always_inline int
20333-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
20334+static __must_check __always_inline unsigned long
20335+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
20336 {
20337 return __copy_from_user_nocheck(dst, src, size);
20338 }
20339
20340-static __must_check __always_inline int
20341-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
20342+static __must_check __always_inline unsigned long
20343+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
20344 {
20345 return __copy_to_user_nocheck(dst, src, size);
20346 }
20347
20348-extern long __copy_user_nocache(void *dst, const void __user *src,
20349- unsigned size, int zerorest);
20350+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
20351+ unsigned long size, int zerorest);
20352
20353-static inline int
20354-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
20355+static inline unsigned long
20356+__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
20357 {
20358 might_fault();
20359+
20360+ if (size > INT_MAX)
20361+ return size;
20362+
20363+#ifdef CONFIG_PAX_MEMORY_UDEREF
20364+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20365+ return size;
20366+#endif
20367+
20368 return __copy_user_nocache(dst, src, size, 1);
20369 }
20370
20371-static inline int
20372+static inline unsigned long
20373 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
20374- unsigned size)
20375+ unsigned long size)
20376 {
20377+ if (size > INT_MAX)
20378+ return size;
20379+
20380+#ifdef CONFIG_PAX_MEMORY_UDEREF
20381+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20382+ return size;
20383+#endif
20384+
20385 return __copy_user_nocache(dst, src, size, 0);
20386 }
20387
20388 unsigned long
20389-copy_user_handle_tail(char *to, char *from, unsigned len);
20390+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len) __size_overflow(3);
20391
20392 #endif /* _ASM_X86_UACCESS_64_H */
20393diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
20394index 5b238981..77fdd78 100644
20395--- a/arch/x86/include/asm/word-at-a-time.h
20396+++ b/arch/x86/include/asm/word-at-a-time.h
20397@@ -11,7 +11,7 @@
20398 * and shift, for example.
20399 */
20400 struct word_at_a_time {
20401- const unsigned long one_bits, high_bits;
20402+ unsigned long one_bits, high_bits;
20403 };
20404
20405 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
20406diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
20407index f58a9c7..dc378042a 100644
20408--- a/arch/x86/include/asm/x86_init.h
20409+++ b/arch/x86/include/asm/x86_init.h
20410@@ -129,7 +129,7 @@ struct x86_init_ops {
20411 struct x86_init_timers timers;
20412 struct x86_init_iommu iommu;
20413 struct x86_init_pci pci;
20414-};
20415+} __no_const;
20416
20417 /**
20418 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
20419@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
20420 void (*setup_percpu_clockev)(void);
20421 void (*early_percpu_clock_init)(void);
20422 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
20423-};
20424+} __no_const;
20425
20426 struct timespec;
20427
20428@@ -168,7 +168,7 @@ struct x86_platform_ops {
20429 void (*save_sched_clock_state)(void);
20430 void (*restore_sched_clock_state)(void);
20431 void (*apic_post_init)(void);
20432-};
20433+} __no_const;
20434
20435 struct pci_dev;
20436 struct msi_msg;
20437@@ -182,7 +182,7 @@ struct x86_msi_ops {
20438 void (*teardown_msi_irqs)(struct pci_dev *dev);
20439 void (*restore_msi_irqs)(struct pci_dev *dev);
20440 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
20441-};
20442+} __no_const;
20443
20444 struct IO_APIC_route_entry;
20445 struct io_apic_irq_attr;
20446@@ -203,7 +203,7 @@ struct x86_io_apic_ops {
20447 unsigned int destination, int vector,
20448 struct io_apic_irq_attr *attr);
20449 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
20450-};
20451+} __no_const;
20452
20453 extern struct x86_init_ops x86_init;
20454 extern struct x86_cpuinit_ops x86_cpuinit;
20455diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
20456index 358dcd3..23c0bf1 100644
20457--- a/arch/x86/include/asm/xen/page.h
20458+++ b/arch/x86/include/asm/xen/page.h
20459@@ -82,7 +82,7 @@ static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
20460 * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special
20461 * cases needing an extended handling.
20462 */
20463-static inline unsigned long __pfn_to_mfn(unsigned long pfn)
20464+static inline unsigned long __intentional_overflow(-1) __pfn_to_mfn(unsigned long pfn)
20465 {
20466 unsigned long mfn;
20467
20468diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
20469index c9a6d68..cb57f42 100644
20470--- a/arch/x86/include/asm/xsave.h
20471+++ b/arch/x86/include/asm/xsave.h
20472@@ -223,12 +223,16 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20473 if (unlikely(err))
20474 return -EFAULT;
20475
20476+ pax_open_userland();
20477 __asm__ __volatile__(ASM_STAC "\n"
20478- "1:"XSAVE"\n"
20479+ "1:"
20480+ __copyuser_seg
20481+ XSAVE"\n"
20482 "2: " ASM_CLAC "\n"
20483 xstate_fault
20484 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
20485 : "memory");
20486+ pax_close_userland();
20487 return err;
20488 }
20489
20490@@ -238,16 +242,20 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20491 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
20492 {
20493 int err = 0;
20494- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
20495+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
20496 u32 lmask = mask;
20497 u32 hmask = mask >> 32;
20498
20499+ pax_open_userland();
20500 __asm__ __volatile__(ASM_STAC "\n"
20501- "1:"XRSTOR"\n"
20502+ "1:"
20503+ __copyuser_seg
20504+ XRSTOR"\n"
20505 "2: " ASM_CLAC "\n"
20506 xstate_fault
20507 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
20508 : "memory"); /* memory required? */
20509+ pax_close_userland();
20510 return err;
20511 }
20512
20513diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
20514index d993e33..8db1b18 100644
20515--- a/arch/x86/include/uapi/asm/e820.h
20516+++ b/arch/x86/include/uapi/asm/e820.h
20517@@ -58,7 +58,7 @@ struct e820map {
20518 #define ISA_START_ADDRESS 0xa0000
20519 #define ISA_END_ADDRESS 0x100000
20520
20521-#define BIOS_BEGIN 0x000a0000
20522+#define BIOS_BEGIN 0x000c0000
20523 #define BIOS_END 0x00100000
20524
20525 #define BIOS_ROM_BASE 0xffe00000
20526diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
20527index 7b0a55a..ad115bf 100644
20528--- a/arch/x86/include/uapi/asm/ptrace-abi.h
20529+++ b/arch/x86/include/uapi/asm/ptrace-abi.h
20530@@ -49,7 +49,6 @@
20531 #define EFLAGS 144
20532 #define RSP 152
20533 #define SS 160
20534-#define ARGOFFSET R11
20535 #endif /* __ASSEMBLY__ */
20536
20537 /* top of stack page */
20538diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
20539index cdb1b70..426434c 100644
20540--- a/arch/x86/kernel/Makefile
20541+++ b/arch/x86/kernel/Makefile
20542@@ -28,7 +28,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
20543 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
20544 obj-$(CONFIG_IRQ_WORK) += irq_work.o
20545 obj-y += probe_roms.o
20546-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
20547+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
20548 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
20549 obj-$(CONFIG_X86_64) += mcount_64.o
20550 obj-y += syscall_$(BITS).o vsyscall_gtod.o
20551diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
20552index 803b684..68c64f1 100644
20553--- a/arch/x86/kernel/acpi/boot.c
20554+++ b/arch/x86/kernel/acpi/boot.c
20555@@ -1361,7 +1361,7 @@ static void __init acpi_reduced_hw_init(void)
20556 * If your system is blacklisted here, but you find that acpi=force
20557 * works for you, please contact linux-acpi@vger.kernel.org
20558 */
20559-static struct dmi_system_id __initdata acpi_dmi_table[] = {
20560+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
20561 /*
20562 * Boxes that need ACPI disabled
20563 */
20564@@ -1436,7 +1436,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
20565 };
20566
20567 /* second table for DMI checks that should run after early-quirks */
20568-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
20569+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
20570 /*
20571 * HP laptops which use a DSDT reporting as HP/SB400/10000,
20572 * which includes some code which overrides all temperature
20573diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
20574index d1daead..acd77e2 100644
20575--- a/arch/x86/kernel/acpi/sleep.c
20576+++ b/arch/x86/kernel/acpi/sleep.c
20577@@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void)
20578 #else /* CONFIG_64BIT */
20579 #ifdef CONFIG_SMP
20580 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
20581+
20582+ pax_open_kernel();
20583 early_gdt_descr.address =
20584 (unsigned long)get_cpu_gdt_table(smp_processor_id());
20585+ pax_close_kernel();
20586+
20587 initial_gs = per_cpu_offset(smp_processor_id());
20588 #endif
20589 initial_code = (unsigned long)wakeup_long64;
20590diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
20591index 665c6b7..eae4d56 100644
20592--- a/arch/x86/kernel/acpi/wakeup_32.S
20593+++ b/arch/x86/kernel/acpi/wakeup_32.S
20594@@ -29,13 +29,11 @@ wakeup_pmode_return:
20595 # and restore the stack ... but you need gdt for this to work
20596 movl saved_context_esp, %esp
20597
20598- movl %cs:saved_magic, %eax
20599- cmpl $0x12345678, %eax
20600+ cmpl $0x12345678, saved_magic
20601 jne bogus_magic
20602
20603 # jump to place where we left off
20604- movl saved_eip, %eax
20605- jmp *%eax
20606+ jmp *(saved_eip)
20607
20608 bogus_magic:
20609 jmp bogus_magic
20610diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
20611index 703130f..27a155d 100644
20612--- a/arch/x86/kernel/alternative.c
20613+++ b/arch/x86/kernel/alternative.c
20614@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20615 */
20616 for (a = start; a < end; a++) {
20617 instr = (u8 *)&a->instr_offset + a->instr_offset;
20618+
20619+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20620+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20621+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20622+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20623+#endif
20624+
20625 replacement = (u8 *)&a->repl_offset + a->repl_offset;
20626 BUG_ON(a->replacementlen > a->instrlen);
20627 BUG_ON(a->instrlen > sizeof(insnbuf));
20628@@ -284,6 +291,11 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20629 add_nops(insnbuf + a->replacementlen,
20630 a->instrlen - a->replacementlen);
20631
20632+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20633+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20634+ instr = ktva_ktla(instr);
20635+#endif
20636+
20637 text_poke_early(instr, insnbuf, a->instrlen);
20638 }
20639 }
20640@@ -299,10 +311,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
20641 for (poff = start; poff < end; poff++) {
20642 u8 *ptr = (u8 *)poff + *poff;
20643
20644+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20645+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20646+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20647+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20648+#endif
20649+
20650 if (!*poff || ptr < text || ptr >= text_end)
20651 continue;
20652 /* turn DS segment override prefix into lock prefix */
20653- if (*ptr == 0x3e)
20654+ if (*ktla_ktva(ptr) == 0x3e)
20655 text_poke(ptr, ((unsigned char []){0xf0}), 1);
20656 }
20657 mutex_unlock(&text_mutex);
20658@@ -317,10 +335,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
20659 for (poff = start; poff < end; poff++) {
20660 u8 *ptr = (u8 *)poff + *poff;
20661
20662+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20663+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20664+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20665+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20666+#endif
20667+
20668 if (!*poff || ptr < text || ptr >= text_end)
20669 continue;
20670 /* turn lock prefix into DS segment override prefix */
20671- if (*ptr == 0xf0)
20672+ if (*ktla_ktva(ptr) == 0xf0)
20673 text_poke(ptr, ((unsigned char []){0x3E}), 1);
20674 }
20675 mutex_unlock(&text_mutex);
20676@@ -457,7 +481,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
20677
20678 BUG_ON(p->len > MAX_PATCH_LEN);
20679 /* prep the buffer with the original instructions */
20680- memcpy(insnbuf, p->instr, p->len);
20681+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
20682 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
20683 (unsigned long)p->instr, p->len);
20684
20685@@ -504,7 +528,7 @@ void __init alternative_instructions(void)
20686 if (!uniproc_patched || num_possible_cpus() == 1)
20687 free_init_pages("SMP alternatives",
20688 (unsigned long)__smp_locks,
20689- (unsigned long)__smp_locks_end);
20690+ PAGE_ALIGN((unsigned long)__smp_locks_end));
20691 #endif
20692
20693 apply_paravirt(__parainstructions, __parainstructions_end);
20694@@ -524,13 +548,17 @@ void __init alternative_instructions(void)
20695 * instructions. And on the local CPU you need to be protected again NMI or MCE
20696 * handlers seeing an inconsistent instruction while you patch.
20697 */
20698-void *__init_or_module text_poke_early(void *addr, const void *opcode,
20699+void *__kprobes text_poke_early(void *addr, const void *opcode,
20700 size_t len)
20701 {
20702 unsigned long flags;
20703 local_irq_save(flags);
20704- memcpy(addr, opcode, len);
20705+
20706+ pax_open_kernel();
20707+ memcpy(ktla_ktva(addr), opcode, len);
20708 sync_core();
20709+ pax_close_kernel();
20710+
20711 local_irq_restore(flags);
20712 /* Could also do a CLFLUSH here to speed up CPU recovery; but
20713 that causes hangs on some VIA CPUs. */
20714@@ -552,36 +580,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
20715 */
20716 void *text_poke(void *addr, const void *opcode, size_t len)
20717 {
20718- unsigned long flags;
20719- char *vaddr;
20720+ unsigned char *vaddr = ktla_ktva(addr);
20721 struct page *pages[2];
20722- int i;
20723+ size_t i;
20724
20725 if (!core_kernel_text((unsigned long)addr)) {
20726- pages[0] = vmalloc_to_page(addr);
20727- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
20728+ pages[0] = vmalloc_to_page(vaddr);
20729+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
20730 } else {
20731- pages[0] = virt_to_page(addr);
20732+ pages[0] = virt_to_page(vaddr);
20733 WARN_ON(!PageReserved(pages[0]));
20734- pages[1] = virt_to_page(addr + PAGE_SIZE);
20735+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
20736 }
20737 BUG_ON(!pages[0]);
20738- local_irq_save(flags);
20739- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
20740- if (pages[1])
20741- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
20742- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
20743- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
20744- clear_fixmap(FIX_TEXT_POKE0);
20745- if (pages[1])
20746- clear_fixmap(FIX_TEXT_POKE1);
20747- local_flush_tlb();
20748- sync_core();
20749- /* Could also do a CLFLUSH here to speed up CPU recovery; but
20750- that causes hangs on some VIA CPUs. */
20751+ text_poke_early(addr, opcode, len);
20752 for (i = 0; i < len; i++)
20753- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
20754- local_irq_restore(flags);
20755+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
20756 return addr;
20757 }
20758
20759@@ -601,7 +615,7 @@ int poke_int3_handler(struct pt_regs *regs)
20760 if (likely(!bp_patching_in_progress))
20761 return 0;
20762
20763- if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
20764+ if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
20765 return 0;
20766
20767 /* set up the specified breakpoint handler */
20768@@ -635,7 +649,7 @@ int poke_int3_handler(struct pt_regs *regs)
20769 */
20770 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
20771 {
20772- unsigned char int3 = 0xcc;
20773+ const unsigned char int3 = 0xcc;
20774
20775 bp_int3_handler = handler;
20776 bp_int3_addr = (u8 *)addr + sizeof(int3);
20777diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
20778index ad3639a..bd4253c 100644
20779--- a/arch/x86/kernel/apic/apic.c
20780+++ b/arch/x86/kernel/apic/apic.c
20781@@ -171,7 +171,7 @@ int first_system_vector = FIRST_SYSTEM_VECTOR;
20782 /*
20783 * Debug level, exported for io_apic.c
20784 */
20785-unsigned int apic_verbosity;
20786+int apic_verbosity;
20787
20788 int pic_mode;
20789
20790@@ -1918,7 +1918,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
20791 apic_write(APIC_ESR, 0);
20792 v = apic_read(APIC_ESR);
20793 ack_APIC_irq();
20794- atomic_inc(&irq_err_count);
20795+ atomic_inc_unchecked(&irq_err_count);
20796
20797 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
20798 smp_processor_id(), v);
20799diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
20800index de918c4..32eed23 100644
20801--- a/arch/x86/kernel/apic/apic_flat_64.c
20802+++ b/arch/x86/kernel/apic/apic_flat_64.c
20803@@ -154,7 +154,7 @@ static int flat_probe(void)
20804 return 1;
20805 }
20806
20807-static struct apic apic_flat = {
20808+static struct apic apic_flat __read_only = {
20809 .name = "flat",
20810 .probe = flat_probe,
20811 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
20812@@ -260,7 +260,7 @@ static int physflat_probe(void)
20813 return 0;
20814 }
20815
20816-static struct apic apic_physflat = {
20817+static struct apic apic_physflat __read_only = {
20818
20819 .name = "physical flat",
20820 .probe = physflat_probe,
20821diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
20822index b205cdb..d8503ff 100644
20823--- a/arch/x86/kernel/apic/apic_noop.c
20824+++ b/arch/x86/kernel/apic/apic_noop.c
20825@@ -108,7 +108,7 @@ static void noop_apic_write(u32 reg, u32 v)
20826 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
20827 }
20828
20829-struct apic apic_noop = {
20830+struct apic apic_noop __read_only = {
20831 .name = "noop",
20832 .probe = noop_probe,
20833 .acpi_madt_oem_check = NULL,
20834diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
20835index c4a8d63..fe893ac 100644
20836--- a/arch/x86/kernel/apic/bigsmp_32.c
20837+++ b/arch/x86/kernel/apic/bigsmp_32.c
20838@@ -147,7 +147,7 @@ static int probe_bigsmp(void)
20839 return dmi_bigsmp;
20840 }
20841
20842-static struct apic apic_bigsmp = {
20843+static struct apic apic_bigsmp __read_only = {
20844
20845 .name = "bigsmp",
20846 .probe = probe_bigsmp,
20847diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
20848index f4dc246..fbab133 100644
20849--- a/arch/x86/kernel/apic/io_apic.c
20850+++ b/arch/x86/kernel/apic/io_apic.c
20851@@ -1862,7 +1862,7 @@ int native_ioapic_set_affinity(struct irq_data *data,
20852 return ret;
20853 }
20854
20855-atomic_t irq_mis_count;
20856+atomic_unchecked_t irq_mis_count;
20857
20858 #ifdef CONFIG_GENERIC_PENDING_IRQ
20859 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
20860@@ -2003,7 +2003,7 @@ static void ack_ioapic_level(struct irq_data *data)
20861 * at the cpu.
20862 */
20863 if (!(v & (1 << (i & 0x1f)))) {
20864- atomic_inc(&irq_mis_count);
20865+ atomic_inc_unchecked(&irq_mis_count);
20866
20867 eoi_ioapic_irq(irq, cfg);
20868 }
20869@@ -2011,7 +2011,7 @@ static void ack_ioapic_level(struct irq_data *data)
20870 ioapic_irqd_unmask(data, cfg, masked);
20871 }
20872
20873-static struct irq_chip ioapic_chip __read_mostly = {
20874+static struct irq_chip ioapic_chip = {
20875 .name = "IO-APIC",
20876 .irq_startup = startup_ioapic_irq,
20877 .irq_mask = mask_ioapic_irq,
20878@@ -2070,7 +2070,7 @@ static void ack_lapic_irq(struct irq_data *data)
20879 ack_APIC_irq();
20880 }
20881
20882-static struct irq_chip lapic_chip __read_mostly = {
20883+static struct irq_chip lapic_chip = {
20884 .name = "local-APIC",
20885 .irq_mask = mask_lapic_irq,
20886 .irq_unmask = unmask_lapic_irq,
20887diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
20888index bda4886..f9c7195 100644
20889--- a/arch/x86/kernel/apic/probe_32.c
20890+++ b/arch/x86/kernel/apic/probe_32.c
20891@@ -72,7 +72,7 @@ static int probe_default(void)
20892 return 1;
20893 }
20894
20895-static struct apic apic_default = {
20896+static struct apic apic_default __read_only = {
20897
20898 .name = "default",
20899 .probe = probe_default,
20900diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
20901index 6cedd79..023ff8e 100644
20902--- a/arch/x86/kernel/apic/vector.c
20903+++ b/arch/x86/kernel/apic/vector.c
20904@@ -21,7 +21,7 @@
20905
20906 static DEFINE_RAW_SPINLOCK(vector_lock);
20907
20908-void lock_vector_lock(void)
20909+void lock_vector_lock(void) __acquires(vector_lock)
20910 {
20911 /* Used to the online set of cpus does not change
20912 * during assign_irq_vector.
20913@@ -29,7 +29,7 @@ void lock_vector_lock(void)
20914 raw_spin_lock(&vector_lock);
20915 }
20916
20917-void unlock_vector_lock(void)
20918+void unlock_vector_lock(void) __releases(vector_lock)
20919 {
20920 raw_spin_unlock(&vector_lock);
20921 }
20922diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
20923index e658f21..b695a1a 100644
20924--- a/arch/x86/kernel/apic/x2apic_cluster.c
20925+++ b/arch/x86/kernel/apic/x2apic_cluster.c
20926@@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
20927 return notifier_from_errno(err);
20928 }
20929
20930-static struct notifier_block __refdata x2apic_cpu_notifier = {
20931+static struct notifier_block x2apic_cpu_notifier = {
20932 .notifier_call = update_clusterinfo,
20933 };
20934
20935@@ -234,7 +234,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
20936 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
20937 }
20938
20939-static struct apic apic_x2apic_cluster = {
20940+static struct apic apic_x2apic_cluster __read_only = {
20941
20942 .name = "cluster x2apic",
20943 .probe = x2apic_cluster_probe,
20944diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
20945index 6fae733..5ca17af 100644
20946--- a/arch/x86/kernel/apic/x2apic_phys.c
20947+++ b/arch/x86/kernel/apic/x2apic_phys.c
20948@@ -88,7 +88,7 @@ static int x2apic_phys_probe(void)
20949 return apic == &apic_x2apic_phys;
20950 }
20951
20952-static struct apic apic_x2apic_phys = {
20953+static struct apic apic_x2apic_phys __read_only = {
20954
20955 .name = "physical x2apic",
20956 .probe = x2apic_phys_probe,
20957diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
20958index 8e9dcfd..c61b3e4 100644
20959--- a/arch/x86/kernel/apic/x2apic_uv_x.c
20960+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
20961@@ -348,7 +348,7 @@ static int uv_probe(void)
20962 return apic == &apic_x2apic_uv_x;
20963 }
20964
20965-static struct apic __refdata apic_x2apic_uv_x = {
20966+static struct apic apic_x2apic_uv_x __read_only = {
20967
20968 .name = "UV large system",
20969 .probe = uv_probe,
20970diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
20971index 927ec92..de68f32 100644
20972--- a/arch/x86/kernel/apm_32.c
20973+++ b/arch/x86/kernel/apm_32.c
20974@@ -432,7 +432,7 @@ static DEFINE_MUTEX(apm_mutex);
20975 * This is for buggy BIOS's that refer to (real mode) segment 0x40
20976 * even though they are called in protected mode.
20977 */
20978-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
20979+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
20980 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
20981
20982 static const char driver_version[] = "1.16ac"; /* no spaces */
20983@@ -610,7 +610,10 @@ static long __apm_bios_call(void *_call)
20984 BUG_ON(cpu != 0);
20985 gdt = get_cpu_gdt_table(cpu);
20986 save_desc_40 = gdt[0x40 / 8];
20987+
20988+ pax_open_kernel();
20989 gdt[0x40 / 8] = bad_bios_desc;
20990+ pax_close_kernel();
20991
20992 apm_irq_save(flags);
20993 APM_DO_SAVE_SEGS;
20994@@ -619,7 +622,11 @@ static long __apm_bios_call(void *_call)
20995 &call->esi);
20996 APM_DO_RESTORE_SEGS;
20997 apm_irq_restore(flags);
20998+
20999+ pax_open_kernel();
21000 gdt[0x40 / 8] = save_desc_40;
21001+ pax_close_kernel();
21002+
21003 put_cpu();
21004
21005 return call->eax & 0xff;
21006@@ -686,7 +693,10 @@ static long __apm_bios_call_simple(void *_call)
21007 BUG_ON(cpu != 0);
21008 gdt = get_cpu_gdt_table(cpu);
21009 save_desc_40 = gdt[0x40 / 8];
21010+
21011+ pax_open_kernel();
21012 gdt[0x40 / 8] = bad_bios_desc;
21013+ pax_close_kernel();
21014
21015 apm_irq_save(flags);
21016 APM_DO_SAVE_SEGS;
21017@@ -694,7 +704,11 @@ static long __apm_bios_call_simple(void *_call)
21018 &call->eax);
21019 APM_DO_RESTORE_SEGS;
21020 apm_irq_restore(flags);
21021+
21022+ pax_open_kernel();
21023 gdt[0x40 / 8] = save_desc_40;
21024+ pax_close_kernel();
21025+
21026 put_cpu();
21027 return error;
21028 }
21029@@ -2039,7 +2053,7 @@ static int __init swab_apm_power_in_minutes(const struct dmi_system_id *d)
21030 return 0;
21031 }
21032
21033-static struct dmi_system_id __initdata apm_dmi_table[] = {
21034+static const struct dmi_system_id __initconst apm_dmi_table[] = {
21035 {
21036 print_if_true,
21037 KERN_WARNING "IBM T23 - BIOS 1.03b+ and controller firmware 1.02+ may be needed for Linux APM.",
21038@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
21039 * code to that CPU.
21040 */
21041 gdt = get_cpu_gdt_table(0);
21042+
21043+ pax_open_kernel();
21044 set_desc_base(&gdt[APM_CS >> 3],
21045 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
21046 set_desc_base(&gdt[APM_CS_16 >> 3],
21047 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
21048 set_desc_base(&gdt[APM_DS >> 3],
21049 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
21050+ pax_close_kernel();
21051
21052 proc_create("apm", 0, NULL, &apm_file_ops);
21053
21054diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
21055index 9f6b934..cf5ffb3 100644
21056--- a/arch/x86/kernel/asm-offsets.c
21057+++ b/arch/x86/kernel/asm-offsets.c
21058@@ -32,6 +32,8 @@ void common(void) {
21059 OFFSET(TI_flags, thread_info, flags);
21060 OFFSET(TI_status, thread_info, status);
21061 OFFSET(TI_addr_limit, thread_info, addr_limit);
21062+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
21063+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
21064
21065 BLANK();
21066 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
21067@@ -52,8 +54,26 @@ void common(void) {
21068 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
21069 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
21070 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
21071+
21072+#ifdef CONFIG_PAX_KERNEXEC
21073+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
21074 #endif
21075
21076+#ifdef CONFIG_PAX_MEMORY_UDEREF
21077+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
21078+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
21079+#ifdef CONFIG_X86_64
21080+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
21081+#endif
21082+#endif
21083+
21084+#endif
21085+
21086+ BLANK();
21087+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
21088+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
21089+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
21090+
21091 #ifdef CONFIG_XEN
21092 BLANK();
21093 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
21094diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
21095index fdcbb4d..036dd93 100644
21096--- a/arch/x86/kernel/asm-offsets_64.c
21097+++ b/arch/x86/kernel/asm-offsets_64.c
21098@@ -80,6 +80,7 @@ int main(void)
21099 BLANK();
21100 #undef ENTRY
21101
21102+ DEFINE(TSS_size, sizeof(struct tss_struct));
21103 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
21104 BLANK();
21105
21106diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
21107index 80091ae..0c5184f 100644
21108--- a/arch/x86/kernel/cpu/Makefile
21109+++ b/arch/x86/kernel/cpu/Makefile
21110@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
21111 CFLAGS_REMOVE_perf_event.o = -pg
21112 endif
21113
21114-# Make sure load_percpu_segment has no stackprotector
21115-nostackp := $(call cc-option, -fno-stack-protector)
21116-CFLAGS_common.o := $(nostackp)
21117-
21118 obj-y := intel_cacheinfo.o scattered.o topology.o
21119 obj-y += common.o
21120 obj-y += rdrand.o
21121diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
21122index a220239..607fc38 100644
21123--- a/arch/x86/kernel/cpu/amd.c
21124+++ b/arch/x86/kernel/cpu/amd.c
21125@@ -717,7 +717,7 @@ static void init_amd(struct cpuinfo_x86 *c)
21126 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
21127 {
21128 /* AMD errata T13 (order #21922) */
21129- if ((c->x86 == 6)) {
21130+ if (c->x86 == 6) {
21131 /* Duron Rev A0 */
21132 if (c->x86_model == 3 && c->x86_mask == 0)
21133 size = 64;
21134diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
21135index 2346c95..c061472 100644
21136--- a/arch/x86/kernel/cpu/common.c
21137+++ b/arch/x86/kernel/cpu/common.c
21138@@ -91,60 +91,6 @@ static const struct cpu_dev default_cpu = {
21139
21140 static const struct cpu_dev *this_cpu = &default_cpu;
21141
21142-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
21143-#ifdef CONFIG_X86_64
21144- /*
21145- * We need valid kernel segments for data and code in long mode too
21146- * IRET will check the segment types kkeil 2000/10/28
21147- * Also sysret mandates a special GDT layout
21148- *
21149- * TLS descriptors are currently at a different place compared to i386.
21150- * Hopefully nobody expects them at a fixed place (Wine?)
21151- */
21152- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
21153- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
21154- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
21155- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
21156- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
21157- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
21158-#else
21159- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
21160- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21161- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
21162- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
21163- /*
21164- * Segments used for calling PnP BIOS have byte granularity.
21165- * They code segments and data segments have fixed 64k limits,
21166- * the transfer segment sizes are set at run time.
21167- */
21168- /* 32-bit code */
21169- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21170- /* 16-bit code */
21171- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21172- /* 16-bit data */
21173- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
21174- /* 16-bit data */
21175- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
21176- /* 16-bit data */
21177- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
21178- /*
21179- * The APM segments have byte granularity and their bases
21180- * are set at run time. All have 64k limits.
21181- */
21182- /* 32-bit code */
21183- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21184- /* 16-bit code */
21185- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21186- /* data */
21187- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
21188-
21189- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21190- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21191- GDT_STACK_CANARY_INIT
21192-#endif
21193-} };
21194-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
21195-
21196 static int __init x86_xsave_setup(char *s)
21197 {
21198 if (strlen(s))
21199@@ -306,6 +252,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
21200 }
21201 }
21202
21203+#ifdef CONFIG_X86_64
21204+static __init int setup_disable_pcid(char *arg)
21205+{
21206+ setup_clear_cpu_cap(X86_FEATURE_PCID);
21207+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
21208+
21209+#ifdef CONFIG_PAX_MEMORY_UDEREF
21210+ if (clone_pgd_mask != ~(pgdval_t)0UL)
21211+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21212+#endif
21213+
21214+ return 1;
21215+}
21216+__setup("nopcid", setup_disable_pcid);
21217+
21218+static void setup_pcid(struct cpuinfo_x86 *c)
21219+{
21220+ if (!cpu_has(c, X86_FEATURE_PCID)) {
21221+ clear_cpu_cap(c, X86_FEATURE_INVPCID);
21222+
21223+#ifdef CONFIG_PAX_MEMORY_UDEREF
21224+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
21225+ pax_open_kernel();
21226+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21227+ pax_close_kernel();
21228+ printk("PAX: slow and weak UDEREF enabled\n");
21229+ } else
21230+ printk("PAX: UDEREF disabled\n");
21231+#endif
21232+
21233+ return;
21234+ }
21235+
21236+ printk("PAX: PCID detected\n");
21237+ cr4_set_bits(X86_CR4_PCIDE);
21238+
21239+#ifdef CONFIG_PAX_MEMORY_UDEREF
21240+ pax_open_kernel();
21241+ clone_pgd_mask = ~(pgdval_t)0UL;
21242+ pax_close_kernel();
21243+ if (pax_user_shadow_base)
21244+ printk("PAX: weak UDEREF enabled\n");
21245+ else {
21246+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
21247+ printk("PAX: strong UDEREF enabled\n");
21248+ }
21249+#endif
21250+
21251+ if (cpu_has(c, X86_FEATURE_INVPCID))
21252+ printk("PAX: INVPCID detected\n");
21253+}
21254+#endif
21255+
21256 /*
21257 * Some CPU features depend on higher CPUID levels, which may not always
21258 * be available due to CPUID level capping or broken virtualization
21259@@ -406,7 +405,7 @@ void switch_to_new_gdt(int cpu)
21260 {
21261 struct desc_ptr gdt_descr;
21262
21263- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
21264+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
21265 gdt_descr.size = GDT_SIZE - 1;
21266 load_gdt(&gdt_descr);
21267 /* Reload the per-cpu base */
21268@@ -897,6 +896,20 @@ static void identify_cpu(struct cpuinfo_x86 *c)
21269 setup_smep(c);
21270 setup_smap(c);
21271
21272+#ifdef CONFIG_X86_32
21273+#ifdef CONFIG_PAX_PAGEEXEC
21274+ if (!(__supported_pte_mask & _PAGE_NX))
21275+ clear_cpu_cap(c, X86_FEATURE_PSE);
21276+#endif
21277+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
21278+ clear_cpu_cap(c, X86_FEATURE_SEP);
21279+#endif
21280+#endif
21281+
21282+#ifdef CONFIG_X86_64
21283+ setup_pcid(c);
21284+#endif
21285+
21286 /*
21287 * The vendor-specific functions might have changed features.
21288 * Now we do "generic changes."
21289@@ -979,7 +992,7 @@ static void syscall32_cpu_init(void)
21290 void enable_sep_cpu(void)
21291 {
21292 int cpu = get_cpu();
21293- struct tss_struct *tss = &per_cpu(init_tss, cpu);
21294+ struct tss_struct *tss = init_tss + cpu;
21295
21296 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21297 put_cpu();
21298@@ -1117,14 +1130,16 @@ static __init int setup_disablecpuid(char *arg)
21299 }
21300 __setup("clearcpuid=", setup_disablecpuid);
21301
21302+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
21303+EXPORT_PER_CPU_SYMBOL(current_tinfo);
21304+
21305 DEFINE_PER_CPU(unsigned long, kernel_stack) =
21306- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
21307+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
21308 EXPORT_PER_CPU_SYMBOL(kernel_stack);
21309
21310 #ifdef CONFIG_X86_64
21311-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21312-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
21313- (unsigned long) debug_idt_table };
21314+struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21315+const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
21316
21317 DEFINE_PER_CPU_FIRST(union irq_stack_union,
21318 irq_stack_union) __aligned(PAGE_SIZE) __visible;
21319@@ -1307,7 +1322,7 @@ void cpu_init(void)
21320 */
21321 load_ucode_ap();
21322
21323- t = &per_cpu(init_tss, cpu);
21324+ t = init_tss + cpu;
21325 oist = &per_cpu(orig_ist, cpu);
21326
21327 #ifdef CONFIG_NUMA
21328@@ -1339,7 +1354,6 @@ void cpu_init(void)
21329 wrmsrl(MSR_KERNEL_GS_BASE, 0);
21330 barrier();
21331
21332- x86_configure_nx();
21333 x2apic_setup();
21334
21335 /*
21336@@ -1391,7 +1405,7 @@ void cpu_init(void)
21337 {
21338 int cpu = smp_processor_id();
21339 struct task_struct *curr = current;
21340- struct tss_struct *t = &per_cpu(init_tss, cpu);
21341+ struct tss_struct *t = init_tss + cpu;
21342 struct thread_struct *thread = &curr->thread;
21343
21344 wait_for_master_cpu(cpu);
21345diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
21346index 6596433..1ad6eaf 100644
21347--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
21348+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
21349@@ -1024,6 +1024,22 @@ static struct attribute *default_attrs[] = {
21350 };
21351
21352 #ifdef CONFIG_AMD_NB
21353+static struct attribute *default_attrs_amd_nb[] = {
21354+ &type.attr,
21355+ &level.attr,
21356+ &coherency_line_size.attr,
21357+ &physical_line_partition.attr,
21358+ &ways_of_associativity.attr,
21359+ &number_of_sets.attr,
21360+ &size.attr,
21361+ &shared_cpu_map.attr,
21362+ &shared_cpu_list.attr,
21363+ NULL,
21364+ NULL,
21365+ NULL,
21366+ NULL
21367+};
21368+
21369 static struct attribute **amd_l3_attrs(void)
21370 {
21371 static struct attribute **attrs;
21372@@ -1034,18 +1050,7 @@ static struct attribute **amd_l3_attrs(void)
21373
21374 n = ARRAY_SIZE(default_attrs);
21375
21376- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
21377- n += 2;
21378-
21379- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
21380- n += 1;
21381-
21382- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
21383- if (attrs == NULL)
21384- return attrs = default_attrs;
21385-
21386- for (n = 0; default_attrs[n]; n++)
21387- attrs[n] = default_attrs[n];
21388+ attrs = default_attrs_amd_nb;
21389
21390 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
21391 attrs[n++] = &cache_disable_0.attr;
21392@@ -1096,6 +1101,13 @@ static struct kobj_type ktype_cache = {
21393 .default_attrs = default_attrs,
21394 };
21395
21396+#ifdef CONFIG_AMD_NB
21397+static struct kobj_type ktype_cache_amd_nb = {
21398+ .sysfs_ops = &sysfs_ops,
21399+ .default_attrs = default_attrs_amd_nb,
21400+};
21401+#endif
21402+
21403 static struct kobj_type ktype_percpu_entry = {
21404 .sysfs_ops = &sysfs_ops,
21405 };
21406@@ -1161,20 +1173,26 @@ static int cache_add_dev(struct device *dev)
21407 return retval;
21408 }
21409
21410+#ifdef CONFIG_AMD_NB
21411+ amd_l3_attrs();
21412+#endif
21413+
21414 for (i = 0; i < num_cache_leaves; i++) {
21415+ struct kobj_type *ktype;
21416+
21417 this_object = INDEX_KOBJECT_PTR(cpu, i);
21418 this_object->cpu = cpu;
21419 this_object->index = i;
21420
21421 this_leaf = CPUID4_INFO_IDX(cpu, i);
21422
21423- ktype_cache.default_attrs = default_attrs;
21424+ ktype = &ktype_cache;
21425 #ifdef CONFIG_AMD_NB
21426 if (this_leaf->base.nb)
21427- ktype_cache.default_attrs = amd_l3_attrs();
21428+ ktype = &ktype_cache_amd_nb;
21429 #endif
21430 retval = kobject_init_and_add(&(this_object->kobj),
21431- &ktype_cache,
21432+ ktype,
21433 per_cpu(ici_cache_kobject, cpu),
21434 "index%1lu", i);
21435 if (unlikely(retval)) {
21436diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
21437index 3c036cb..3b5677d 100644
21438--- a/arch/x86/kernel/cpu/mcheck/mce.c
21439+++ b/arch/x86/kernel/cpu/mcheck/mce.c
21440@@ -47,6 +47,7 @@
21441 #include <asm/tlbflush.h>
21442 #include <asm/mce.h>
21443 #include <asm/msr.h>
21444+#include <asm/local.h>
21445
21446 #include "mce-internal.h"
21447
21448@@ -258,7 +259,7 @@ static void print_mce(struct mce *m)
21449 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
21450 m->cs, m->ip);
21451
21452- if (m->cs == __KERNEL_CS)
21453+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
21454 print_symbol("{%s}", m->ip);
21455 pr_cont("\n");
21456 }
21457@@ -291,10 +292,10 @@ static void print_mce(struct mce *m)
21458
21459 #define PANIC_TIMEOUT 5 /* 5 seconds */
21460
21461-static atomic_t mce_panicked;
21462+static atomic_unchecked_t mce_panicked;
21463
21464 static int fake_panic;
21465-static atomic_t mce_fake_panicked;
21466+static atomic_unchecked_t mce_fake_panicked;
21467
21468 /* Panic in progress. Enable interrupts and wait for final IPI */
21469 static void wait_for_panic(void)
21470@@ -318,7 +319,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
21471 /*
21472 * Make sure only one CPU runs in machine check panic
21473 */
21474- if (atomic_inc_return(&mce_panicked) > 1)
21475+ if (atomic_inc_return_unchecked(&mce_panicked) > 1)
21476 wait_for_panic();
21477 barrier();
21478
21479@@ -326,7 +327,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
21480 console_verbose();
21481 } else {
21482 /* Don't log too much for fake panic */
21483- if (atomic_inc_return(&mce_fake_panicked) > 1)
21484+ if (atomic_inc_return_unchecked(&mce_fake_panicked) > 1)
21485 return;
21486 }
21487 /* First print corrected ones that are still unlogged */
21488@@ -365,7 +366,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
21489 if (!fake_panic) {
21490 if (panic_timeout == 0)
21491 panic_timeout = mca_cfg.panic_timeout;
21492- panic(msg);
21493+ panic("%s", msg);
21494 } else
21495 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
21496 }
21497@@ -743,7 +744,7 @@ static int mce_timed_out(u64 *t, const char *msg)
21498 * might have been modified by someone else.
21499 */
21500 rmb();
21501- if (atomic_read(&mce_panicked))
21502+ if (atomic_read_unchecked(&mce_panicked))
21503 wait_for_panic();
21504 if (!mca_cfg.monarch_timeout)
21505 goto out;
21506@@ -1669,7 +1670,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
21507 }
21508
21509 /* Call the installed machine check handler for this CPU setup. */
21510-void (*machine_check_vector)(struct pt_regs *, long error_code) =
21511+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
21512 unexpected_machine_check;
21513
21514 /*
21515@@ -1692,7 +1693,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21516 return;
21517 }
21518
21519+ pax_open_kernel();
21520 machine_check_vector = do_machine_check;
21521+ pax_close_kernel();
21522
21523 __mcheck_cpu_init_generic();
21524 __mcheck_cpu_init_vendor(c);
21525@@ -1706,7 +1709,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21526 */
21527
21528 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
21529-static int mce_chrdev_open_count; /* #times opened */
21530+static local_t mce_chrdev_open_count; /* #times opened */
21531 static int mce_chrdev_open_exclu; /* already open exclusive? */
21532
21533 static int mce_chrdev_open(struct inode *inode, struct file *file)
21534@@ -1714,7 +1717,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21535 spin_lock(&mce_chrdev_state_lock);
21536
21537 if (mce_chrdev_open_exclu ||
21538- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
21539+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
21540 spin_unlock(&mce_chrdev_state_lock);
21541
21542 return -EBUSY;
21543@@ -1722,7 +1725,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21544
21545 if (file->f_flags & O_EXCL)
21546 mce_chrdev_open_exclu = 1;
21547- mce_chrdev_open_count++;
21548+ local_inc(&mce_chrdev_open_count);
21549
21550 spin_unlock(&mce_chrdev_state_lock);
21551
21552@@ -1733,7 +1736,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
21553 {
21554 spin_lock(&mce_chrdev_state_lock);
21555
21556- mce_chrdev_open_count--;
21557+ local_dec(&mce_chrdev_open_count);
21558 mce_chrdev_open_exclu = 0;
21559
21560 spin_unlock(&mce_chrdev_state_lock);
21561@@ -2408,7 +2411,7 @@ static __init void mce_init_banks(void)
21562
21563 for (i = 0; i < mca_cfg.banks; i++) {
21564 struct mce_bank *b = &mce_banks[i];
21565- struct device_attribute *a = &b->attr;
21566+ device_attribute_no_const *a = &b->attr;
21567
21568 sysfs_attr_init(&a->attr);
21569 a->attr.name = b->attrname;
21570@@ -2515,7 +2518,7 @@ struct dentry *mce_get_debugfs_dir(void)
21571 static void mce_reset(void)
21572 {
21573 cpu_missing = 0;
21574- atomic_set(&mce_fake_panicked, 0);
21575+ atomic_set_unchecked(&mce_fake_panicked, 0);
21576 atomic_set(&mce_executing, 0);
21577 atomic_set(&mce_callin, 0);
21578 atomic_set(&global_nwo, 0);
21579diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
21580index 737b0ad..09ec66e 100644
21581--- a/arch/x86/kernel/cpu/mcheck/p5.c
21582+++ b/arch/x86/kernel/cpu/mcheck/p5.c
21583@@ -12,6 +12,7 @@
21584 #include <asm/tlbflush.h>
21585 #include <asm/mce.h>
21586 #include <asm/msr.h>
21587+#include <asm/pgtable.h>
21588
21589 /* By default disabled */
21590 int mce_p5_enabled __read_mostly;
21591@@ -55,7 +56,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
21592 if (!cpu_has(c, X86_FEATURE_MCE))
21593 return;
21594
21595+ pax_open_kernel();
21596 machine_check_vector = pentium_machine_check;
21597+ pax_close_kernel();
21598 /* Make sure the vector pointer is visible before we enable MCEs: */
21599 wmb();
21600
21601diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
21602index 44f1382..315b292 100644
21603--- a/arch/x86/kernel/cpu/mcheck/winchip.c
21604+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
21605@@ -11,6 +11,7 @@
21606 #include <asm/tlbflush.h>
21607 #include <asm/mce.h>
21608 #include <asm/msr.h>
21609+#include <asm/pgtable.h>
21610
21611 /* Machine check handler for WinChip C6: */
21612 static void winchip_machine_check(struct pt_regs *regs, long error_code)
21613@@ -28,7 +29,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
21614 {
21615 u32 lo, hi;
21616
21617+ pax_open_kernel();
21618 machine_check_vector = winchip_machine_check;
21619+ pax_close_kernel();
21620 /* Make sure the vector pointer is visible before we enable MCEs: */
21621 wmb();
21622
21623diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
21624index 36a8361..e7058c2 100644
21625--- a/arch/x86/kernel/cpu/microcode/core.c
21626+++ b/arch/x86/kernel/cpu/microcode/core.c
21627@@ -518,7 +518,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
21628 return NOTIFY_OK;
21629 }
21630
21631-static struct notifier_block __refdata mc_cpu_notifier = {
21632+static struct notifier_block mc_cpu_notifier = {
21633 .notifier_call = mc_cpu_callback,
21634 };
21635
21636diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
21637index 746e7fd..8dc677e 100644
21638--- a/arch/x86/kernel/cpu/microcode/intel.c
21639+++ b/arch/x86/kernel/cpu/microcode/intel.c
21640@@ -298,13 +298,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
21641
21642 static int get_ucode_user(void *to, const void *from, size_t n)
21643 {
21644- return copy_from_user(to, from, n);
21645+ return copy_from_user(to, (const void __force_user *)from, n);
21646 }
21647
21648 static enum ucode_state
21649 request_microcode_user(int cpu, const void __user *buf, size_t size)
21650 {
21651- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
21652+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
21653 }
21654
21655 static void microcode_fini_cpu(int cpu)
21656diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
21657index ea5f363..cb0e905 100644
21658--- a/arch/x86/kernel/cpu/mtrr/main.c
21659+++ b/arch/x86/kernel/cpu/mtrr/main.c
21660@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
21661 u64 size_or_mask, size_and_mask;
21662 static bool mtrr_aps_delayed_init;
21663
21664-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
21665+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
21666
21667 const struct mtrr_ops *mtrr_if;
21668
21669diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
21670index df5e41f..816c719 100644
21671--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
21672+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
21673@@ -25,7 +25,7 @@ struct mtrr_ops {
21674 int (*validate_add_page)(unsigned long base, unsigned long size,
21675 unsigned int type);
21676 int (*have_wrcomb)(void);
21677-};
21678+} __do_const;
21679
21680 extern int generic_get_free_region(unsigned long base, unsigned long size,
21681 int replace_reg);
21682diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
21683index b71a7f8..534af0e 100644
21684--- a/arch/x86/kernel/cpu/perf_event.c
21685+++ b/arch/x86/kernel/cpu/perf_event.c
21686@@ -1376,7 +1376,7 @@ static void __init pmu_check_apic(void)
21687
21688 }
21689
21690-static struct attribute_group x86_pmu_format_group = {
21691+static attribute_group_no_const x86_pmu_format_group = {
21692 .name = "format",
21693 .attrs = NULL,
21694 };
21695@@ -1475,7 +1475,7 @@ static struct attribute *events_attr[] = {
21696 NULL,
21697 };
21698
21699-static struct attribute_group x86_pmu_events_group = {
21700+static attribute_group_no_const x86_pmu_events_group = {
21701 .name = "events",
21702 .attrs = events_attr,
21703 };
21704@@ -2037,7 +2037,7 @@ static unsigned long get_segment_base(unsigned int segment)
21705 if (idx > GDT_ENTRIES)
21706 return 0;
21707
21708- desc = raw_cpu_ptr(gdt_page.gdt);
21709+ desc = get_cpu_gdt_table(smp_processor_id());
21710 }
21711
21712 return get_desc_base(desc + idx);
21713@@ -2127,7 +2127,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
21714 break;
21715
21716 perf_callchain_store(entry, frame.return_address);
21717- fp = frame.next_frame;
21718+ fp = (const void __force_user *)frame.next_frame;
21719 }
21720 }
21721
21722diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21723index 97242a9..cf9c30e 100644
21724--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21725+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21726@@ -402,7 +402,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
21727 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
21728 {
21729 struct attribute **attrs;
21730- struct attribute_group *attr_group;
21731+ attribute_group_no_const *attr_group;
21732 int i = 0, j;
21733
21734 while (amd_iommu_v2_event_descs[i].attr.attr.name)
21735diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
21736index 2589906..1ca1000 100644
21737--- a/arch/x86/kernel/cpu/perf_event_intel.c
21738+++ b/arch/x86/kernel/cpu/perf_event_intel.c
21739@@ -2353,10 +2353,10 @@ __init int intel_pmu_init(void)
21740 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
21741
21742 if (boot_cpu_has(X86_FEATURE_PDCM)) {
21743- u64 capabilities;
21744+ u64 capabilities = x86_pmu.intel_cap.capabilities;
21745
21746- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
21747- x86_pmu.intel_cap.capabilities = capabilities;
21748+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
21749+ x86_pmu.intel_cap.capabilities = capabilities;
21750 }
21751
21752 intel_ds_init();
21753diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21754index c4bb8b8..9f7384d 100644
21755--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21756+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21757@@ -465,7 +465,7 @@ static struct attribute *rapl_events_hsw_attr[] = {
21758 NULL,
21759 };
21760
21761-static struct attribute_group rapl_pmu_events_group = {
21762+static attribute_group_no_const rapl_pmu_events_group __read_only = {
21763 .name = "events",
21764 .attrs = NULL, /* patched at runtime */
21765 };
21766diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21767index c635b8b..b78835e 100644
21768--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21769+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21770@@ -733,7 +733,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
21771 static int __init uncore_type_init(struct intel_uncore_type *type)
21772 {
21773 struct intel_uncore_pmu *pmus;
21774- struct attribute_group *attr_group;
21775+ attribute_group_no_const *attr_group;
21776 struct attribute **attrs;
21777 int i, j;
21778
21779diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21780index 6c8c1e7..515b98a 100644
21781--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21782+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21783@@ -114,7 +114,7 @@ struct intel_uncore_box {
21784 struct uncore_event_desc {
21785 struct kobj_attribute attr;
21786 const char *config;
21787-};
21788+} __do_const;
21789
21790 ssize_t uncore_event_show(struct kobject *kobj,
21791 struct kobj_attribute *attr, char *buf);
21792diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
21793index 83741a7..bd3507d 100644
21794--- a/arch/x86/kernel/cpuid.c
21795+++ b/arch/x86/kernel/cpuid.c
21796@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
21797 return notifier_from_errno(err);
21798 }
21799
21800-static struct notifier_block __refdata cpuid_class_cpu_notifier =
21801+static struct notifier_block cpuid_class_cpu_notifier =
21802 {
21803 .notifier_call = cpuid_class_cpu_callback,
21804 };
21805diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
21806index aceb2f9..c76d3e3 100644
21807--- a/arch/x86/kernel/crash.c
21808+++ b/arch/x86/kernel/crash.c
21809@@ -105,7 +105,7 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
21810 #ifdef CONFIG_X86_32
21811 struct pt_regs fixed_regs;
21812
21813- if (!user_mode_vm(regs)) {
21814+ if (!user_mode(regs)) {
21815 crash_fixup_ss_esp(&fixed_regs, regs);
21816 regs = &fixed_regs;
21817 }
21818diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
21819index afa64ad..dce67dd 100644
21820--- a/arch/x86/kernel/crash_dump_64.c
21821+++ b/arch/x86/kernel/crash_dump_64.c
21822@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
21823 return -ENOMEM;
21824
21825 if (userbuf) {
21826- if (copy_to_user(buf, vaddr + offset, csize)) {
21827+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
21828 iounmap(vaddr);
21829 return -EFAULT;
21830 }
21831diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
21832index f6dfd93..892ade4 100644
21833--- a/arch/x86/kernel/doublefault.c
21834+++ b/arch/x86/kernel/doublefault.c
21835@@ -12,7 +12,7 @@
21836
21837 #define DOUBLEFAULT_STACKSIZE (1024)
21838 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
21839-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
21840+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
21841
21842 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
21843
21844@@ -22,7 +22,7 @@ static void doublefault_fn(void)
21845 unsigned long gdt, tss;
21846
21847 native_store_gdt(&gdt_desc);
21848- gdt = gdt_desc.address;
21849+ gdt = (unsigned long)gdt_desc.address;
21850
21851 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
21852
21853@@ -59,10 +59,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
21854 /* 0x2 bit is always set */
21855 .flags = X86_EFLAGS_SF | 0x2,
21856 .sp = STACK_START,
21857- .es = __USER_DS,
21858+ .es = __KERNEL_DS,
21859 .cs = __KERNEL_CS,
21860 .ss = __KERNEL_DS,
21861- .ds = __USER_DS,
21862+ .ds = __KERNEL_DS,
21863 .fs = __KERNEL_PERCPU,
21864
21865 .__cr3 = __pa_nodebug(swapper_pg_dir),
21866diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
21867index cf3df1d..b637d9a 100644
21868--- a/arch/x86/kernel/dumpstack.c
21869+++ b/arch/x86/kernel/dumpstack.c
21870@@ -2,6 +2,9 @@
21871 * Copyright (C) 1991, 1992 Linus Torvalds
21872 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
21873 */
21874+#ifdef CONFIG_GRKERNSEC_HIDESYM
21875+#define __INCLUDED_BY_HIDESYM 1
21876+#endif
21877 #include <linux/kallsyms.h>
21878 #include <linux/kprobes.h>
21879 #include <linux/uaccess.h>
21880@@ -33,23 +36,21 @@ static void printk_stack_address(unsigned long address, int reliable)
21881
21882 void printk_address(unsigned long address)
21883 {
21884- pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
21885+ pr_cont(" [<%p>] %pA\n", (void *)address, (void *)address);
21886 }
21887
21888 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
21889 static void
21890 print_ftrace_graph_addr(unsigned long addr, void *data,
21891 const struct stacktrace_ops *ops,
21892- struct thread_info *tinfo, int *graph)
21893+ struct task_struct *task, int *graph)
21894 {
21895- struct task_struct *task;
21896 unsigned long ret_addr;
21897 int index;
21898
21899 if (addr != (unsigned long)return_to_handler)
21900 return;
21901
21902- task = tinfo->task;
21903 index = task->curr_ret_stack;
21904
21905 if (!task->ret_stack || index < *graph)
21906@@ -66,7 +67,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21907 static inline void
21908 print_ftrace_graph_addr(unsigned long addr, void *data,
21909 const struct stacktrace_ops *ops,
21910- struct thread_info *tinfo, int *graph)
21911+ struct task_struct *task, int *graph)
21912 { }
21913 #endif
21914
21915@@ -77,10 +78,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21916 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
21917 */
21918
21919-static inline int valid_stack_ptr(struct thread_info *tinfo,
21920- void *p, unsigned int size, void *end)
21921+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
21922 {
21923- void *t = tinfo;
21924 if (end) {
21925 if (p < end && p >= (end-THREAD_SIZE))
21926 return 1;
21927@@ -91,14 +90,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
21928 }
21929
21930 unsigned long
21931-print_context_stack(struct thread_info *tinfo,
21932+print_context_stack(struct task_struct *task, void *stack_start,
21933 unsigned long *stack, unsigned long bp,
21934 const struct stacktrace_ops *ops, void *data,
21935 unsigned long *end, int *graph)
21936 {
21937 struct stack_frame *frame = (struct stack_frame *)bp;
21938
21939- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
21940+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
21941 unsigned long addr;
21942
21943 addr = *stack;
21944@@ -110,7 +109,7 @@ print_context_stack(struct thread_info *tinfo,
21945 } else {
21946 ops->address(data, addr, 0);
21947 }
21948- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
21949+ print_ftrace_graph_addr(addr, data, ops, task, graph);
21950 }
21951 stack++;
21952 }
21953@@ -119,7 +118,7 @@ print_context_stack(struct thread_info *tinfo,
21954 EXPORT_SYMBOL_GPL(print_context_stack);
21955
21956 unsigned long
21957-print_context_stack_bp(struct thread_info *tinfo,
21958+print_context_stack_bp(struct task_struct *task, void *stack_start,
21959 unsigned long *stack, unsigned long bp,
21960 const struct stacktrace_ops *ops, void *data,
21961 unsigned long *end, int *graph)
21962@@ -127,7 +126,7 @@ print_context_stack_bp(struct thread_info *tinfo,
21963 struct stack_frame *frame = (struct stack_frame *)bp;
21964 unsigned long *ret_addr = &frame->return_address;
21965
21966- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
21967+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
21968 unsigned long addr = *ret_addr;
21969
21970 if (!__kernel_text_address(addr))
21971@@ -136,7 +135,7 @@ print_context_stack_bp(struct thread_info *tinfo,
21972 ops->address(data, addr, 1);
21973 frame = frame->next_frame;
21974 ret_addr = &frame->return_address;
21975- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
21976+ print_ftrace_graph_addr(addr, data, ops, task, graph);
21977 }
21978
21979 return (unsigned long)frame;
21980@@ -155,7 +154,7 @@ static int print_trace_stack(void *data, char *name)
21981 static void print_trace_address(void *data, unsigned long addr, int reliable)
21982 {
21983 touch_nmi_watchdog();
21984- printk(data);
21985+ printk("%s", (char *)data);
21986 printk_stack_address(addr, reliable);
21987 }
21988
21989@@ -225,6 +224,8 @@ unsigned long oops_begin(void)
21990 EXPORT_SYMBOL_GPL(oops_begin);
21991 NOKPROBE_SYMBOL(oops_begin);
21992
21993+extern void gr_handle_kernel_exploit(void);
21994+
21995 void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
21996 {
21997 if (regs && kexec_should_crash(current))
21998@@ -246,7 +247,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
21999 panic("Fatal exception in interrupt");
22000 if (panic_on_oops)
22001 panic("Fatal exception");
22002- do_exit(signr);
22003+
22004+ gr_handle_kernel_exploit();
22005+
22006+ do_group_exit(signr);
22007 }
22008 NOKPROBE_SYMBOL(oops_end);
22009
22010@@ -278,7 +282,7 @@ int __die(const char *str, struct pt_regs *regs, long err)
22011 print_modules();
22012 show_regs(regs);
22013 #ifdef CONFIG_X86_32
22014- if (user_mode_vm(regs)) {
22015+ if (user_mode(regs)) {
22016 sp = regs->sp;
22017 ss = regs->ss & 0xffff;
22018 } else {
22019@@ -307,7 +311,7 @@ void die(const char *str, struct pt_regs *regs, long err)
22020 unsigned long flags = oops_begin();
22021 int sig = SIGSEGV;
22022
22023- if (!user_mode_vm(regs))
22024+ if (!user_mode(regs))
22025 report_bug(regs->ip, regs);
22026
22027 if (__die(str, regs, err))
22028diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
22029index 5abd4cd..c65733b 100644
22030--- a/arch/x86/kernel/dumpstack_32.c
22031+++ b/arch/x86/kernel/dumpstack_32.c
22032@@ -61,15 +61,14 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22033 bp = stack_frame(task, regs);
22034
22035 for (;;) {
22036- struct thread_info *context;
22037+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22038 void *end_stack;
22039
22040 end_stack = is_hardirq_stack(stack, cpu);
22041 if (!end_stack)
22042 end_stack = is_softirq_stack(stack, cpu);
22043
22044- context = task_thread_info(task);
22045- bp = ops->walk_stack(context, stack, bp, ops, data,
22046+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data,
22047 end_stack, &graph);
22048
22049 /* Stop if not on irq stack */
22050@@ -123,27 +122,28 @@ void show_regs(struct pt_regs *regs)
22051 int i;
22052
22053 show_regs_print_info(KERN_EMERG);
22054- __show_regs(regs, !user_mode_vm(regs));
22055+ __show_regs(regs, !user_mode(regs));
22056
22057 /*
22058 * When in-kernel, we also print out the stack and code at the
22059 * time of the fault..
22060 */
22061- if (!user_mode_vm(regs)) {
22062+ if (!user_mode(regs)) {
22063 unsigned int code_prologue = code_bytes * 43 / 64;
22064 unsigned int code_len = code_bytes;
22065 unsigned char c;
22066 u8 *ip;
22067+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
22068
22069 pr_emerg("Stack:\n");
22070 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
22071
22072 pr_emerg("Code:");
22073
22074- ip = (u8 *)regs->ip - code_prologue;
22075+ ip = (u8 *)regs->ip - code_prologue + cs_base;
22076 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
22077 /* try starting at IP */
22078- ip = (u8 *)regs->ip;
22079+ ip = (u8 *)regs->ip + cs_base;
22080 code_len = code_len - code_prologue + 1;
22081 }
22082 for (i = 0; i < code_len; i++, ip++) {
22083@@ -152,7 +152,7 @@ void show_regs(struct pt_regs *regs)
22084 pr_cont(" Bad EIP value.");
22085 break;
22086 }
22087- if (ip == (u8 *)regs->ip)
22088+ if (ip == (u8 *)regs->ip + cs_base)
22089 pr_cont(" <%02x>", c);
22090 else
22091 pr_cont(" %02x", c);
22092@@ -165,6 +165,7 @@ int is_valid_bugaddr(unsigned long ip)
22093 {
22094 unsigned short ud2;
22095
22096+ ip = ktla_ktva(ip);
22097 if (ip < PAGE_OFFSET)
22098 return 0;
22099 if (probe_kernel_address((unsigned short *)ip, ud2))
22100@@ -172,3 +173,15 @@ int is_valid_bugaddr(unsigned long ip)
22101
22102 return ud2 == 0x0b0f;
22103 }
22104+
22105+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22106+void pax_check_alloca(unsigned long size)
22107+{
22108+ unsigned long sp = (unsigned long)&sp, stack_left;
22109+
22110+ /* all kernel stacks are of the same size */
22111+ stack_left = sp & (THREAD_SIZE - 1);
22112+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22113+}
22114+EXPORT_SYMBOL(pax_check_alloca);
22115+#endif
22116diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
22117index ff86f19..73eabf4 100644
22118--- a/arch/x86/kernel/dumpstack_64.c
22119+++ b/arch/x86/kernel/dumpstack_64.c
22120@@ -153,12 +153,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22121 const struct stacktrace_ops *ops, void *data)
22122 {
22123 const unsigned cpu = get_cpu();
22124- struct thread_info *tinfo;
22125 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
22126 unsigned long dummy;
22127 unsigned used = 0;
22128 int graph = 0;
22129 int done = 0;
22130+ void *stack_start;
22131
22132 if (!task)
22133 task = current;
22134@@ -179,7 +179,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22135 * current stack address. If the stacks consist of nested
22136 * exceptions
22137 */
22138- tinfo = task_thread_info(task);
22139 while (!done) {
22140 unsigned long *stack_end;
22141 enum stack_type stype;
22142@@ -202,7 +201,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22143 if (ops->stack(data, id) < 0)
22144 break;
22145
22146- bp = ops->walk_stack(tinfo, stack, bp, ops,
22147+ bp = ops->walk_stack(task, stack_end - EXCEPTION_STKSZ, stack, bp, ops,
22148 data, stack_end, &graph);
22149 ops->stack(data, "<EOE>");
22150 /*
22151@@ -210,6 +209,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22152 * second-to-last pointer (index -2 to end) in the
22153 * exception stack:
22154 */
22155+ if ((u16)stack_end[-1] != __KERNEL_DS)
22156+ goto out;
22157 stack = (unsigned long *) stack_end[-2];
22158 done = 0;
22159 break;
22160@@ -218,7 +219,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22161
22162 if (ops->stack(data, "IRQ") < 0)
22163 break;
22164- bp = ops->walk_stack(tinfo, stack, bp,
22165+ bp = ops->walk_stack(task, irq_stack, stack, bp,
22166 ops, data, stack_end, &graph);
22167 /*
22168 * We link to the next stack (which would be
22169@@ -240,7 +241,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22170 /*
22171 * This handles the process stack:
22172 */
22173- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
22174+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22175+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
22176+out:
22177 put_cpu();
22178 }
22179 EXPORT_SYMBOL(dump_trace);
22180@@ -344,8 +347,55 @@ int is_valid_bugaddr(unsigned long ip)
22181 {
22182 unsigned short ud2;
22183
22184- if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
22185+ if (probe_kernel_address((unsigned short *)ip, ud2))
22186 return 0;
22187
22188 return ud2 == 0x0b0f;
22189 }
22190+
22191+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22192+void pax_check_alloca(unsigned long size)
22193+{
22194+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
22195+ unsigned cpu, used;
22196+ char *id;
22197+
22198+ /* check the process stack first */
22199+ stack_start = (unsigned long)task_stack_page(current);
22200+ stack_end = stack_start + THREAD_SIZE;
22201+ if (likely(stack_start <= sp && sp < stack_end)) {
22202+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
22203+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22204+ return;
22205+ }
22206+
22207+ cpu = get_cpu();
22208+
22209+ /* check the irq stacks */
22210+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
22211+ stack_start = stack_end - IRQ_STACK_SIZE;
22212+ if (stack_start <= sp && sp < stack_end) {
22213+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
22214+ put_cpu();
22215+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22216+ return;
22217+ }
22218+
22219+ /* check the exception stacks */
22220+ used = 0;
22221+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
22222+ stack_start = stack_end - EXCEPTION_STKSZ;
22223+ if (stack_end && stack_start <= sp && sp < stack_end) {
22224+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
22225+ put_cpu();
22226+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22227+ return;
22228+ }
22229+
22230+ put_cpu();
22231+
22232+ /* unknown stack */
22233+ BUG();
22234+}
22235+EXPORT_SYMBOL(pax_check_alloca);
22236+#endif
22237diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
22238index 46201de..ebffabf 100644
22239--- a/arch/x86/kernel/e820.c
22240+++ b/arch/x86/kernel/e820.c
22241@@ -794,8 +794,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
22242
22243 static void early_panic(char *msg)
22244 {
22245- early_printk(msg);
22246- panic(msg);
22247+ early_printk("%s", msg);
22248+ panic("%s", msg);
22249 }
22250
22251 static int userdef __initdata;
22252diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
22253index a62536a..8444df4 100644
22254--- a/arch/x86/kernel/early_printk.c
22255+++ b/arch/x86/kernel/early_printk.c
22256@@ -7,6 +7,7 @@
22257 #include <linux/pci_regs.h>
22258 #include <linux/pci_ids.h>
22259 #include <linux/errno.h>
22260+#include <linux/sched.h>
22261 #include <asm/io.h>
22262 #include <asm/processor.h>
22263 #include <asm/fcntl.h>
22264diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
22265index 31e2d5b..b31c76d 100644
22266--- a/arch/x86/kernel/entry_32.S
22267+++ b/arch/x86/kernel/entry_32.S
22268@@ -177,13 +177,154 @@
22269 /*CFI_REL_OFFSET gs, PT_GS*/
22270 .endm
22271 .macro SET_KERNEL_GS reg
22272+
22273+#ifdef CONFIG_CC_STACKPROTECTOR
22274 movl $(__KERNEL_STACK_CANARY), \reg
22275+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
22276+ movl $(__USER_DS), \reg
22277+#else
22278+ xorl \reg, \reg
22279+#endif
22280+
22281 movl \reg, %gs
22282 .endm
22283
22284 #endif /* CONFIG_X86_32_LAZY_GS */
22285
22286-.macro SAVE_ALL
22287+.macro pax_enter_kernel
22288+#ifdef CONFIG_PAX_KERNEXEC
22289+ call pax_enter_kernel
22290+#endif
22291+.endm
22292+
22293+.macro pax_exit_kernel
22294+#ifdef CONFIG_PAX_KERNEXEC
22295+ call pax_exit_kernel
22296+#endif
22297+.endm
22298+
22299+#ifdef CONFIG_PAX_KERNEXEC
22300+ENTRY(pax_enter_kernel)
22301+#ifdef CONFIG_PARAVIRT
22302+ pushl %eax
22303+ pushl %ecx
22304+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
22305+ mov %eax, %esi
22306+#else
22307+ mov %cr0, %esi
22308+#endif
22309+ bts $16, %esi
22310+ jnc 1f
22311+ mov %cs, %esi
22312+ cmp $__KERNEL_CS, %esi
22313+ jz 3f
22314+ ljmp $__KERNEL_CS, $3f
22315+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
22316+2:
22317+#ifdef CONFIG_PARAVIRT
22318+ mov %esi, %eax
22319+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
22320+#else
22321+ mov %esi, %cr0
22322+#endif
22323+3:
22324+#ifdef CONFIG_PARAVIRT
22325+ popl %ecx
22326+ popl %eax
22327+#endif
22328+ ret
22329+ENDPROC(pax_enter_kernel)
22330+
22331+ENTRY(pax_exit_kernel)
22332+#ifdef CONFIG_PARAVIRT
22333+ pushl %eax
22334+ pushl %ecx
22335+#endif
22336+ mov %cs, %esi
22337+ cmp $__KERNEXEC_KERNEL_CS, %esi
22338+ jnz 2f
22339+#ifdef CONFIG_PARAVIRT
22340+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
22341+ mov %eax, %esi
22342+#else
22343+ mov %cr0, %esi
22344+#endif
22345+ btr $16, %esi
22346+ ljmp $__KERNEL_CS, $1f
22347+1:
22348+#ifdef CONFIG_PARAVIRT
22349+ mov %esi, %eax
22350+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
22351+#else
22352+ mov %esi, %cr0
22353+#endif
22354+2:
22355+#ifdef CONFIG_PARAVIRT
22356+ popl %ecx
22357+ popl %eax
22358+#endif
22359+ ret
22360+ENDPROC(pax_exit_kernel)
22361+#endif
22362+
22363+ .macro pax_erase_kstack
22364+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22365+ call pax_erase_kstack
22366+#endif
22367+ .endm
22368+
22369+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22370+/*
22371+ * ebp: thread_info
22372+ */
22373+ENTRY(pax_erase_kstack)
22374+ pushl %edi
22375+ pushl %ecx
22376+ pushl %eax
22377+
22378+ mov TI_lowest_stack(%ebp), %edi
22379+ mov $-0xBEEF, %eax
22380+ std
22381+
22382+1: mov %edi, %ecx
22383+ and $THREAD_SIZE_asm - 1, %ecx
22384+ shr $2, %ecx
22385+ repne scasl
22386+ jecxz 2f
22387+
22388+ cmp $2*16, %ecx
22389+ jc 2f
22390+
22391+ mov $2*16, %ecx
22392+ repe scasl
22393+ jecxz 2f
22394+ jne 1b
22395+
22396+2: cld
22397+ or $2*4, %edi
22398+ mov %esp, %ecx
22399+ sub %edi, %ecx
22400+
22401+ cmp $THREAD_SIZE_asm, %ecx
22402+ jb 3f
22403+ ud2
22404+3:
22405+
22406+ shr $2, %ecx
22407+ rep stosl
22408+
22409+ mov TI_task_thread_sp0(%ebp), %edi
22410+ sub $128, %edi
22411+ mov %edi, TI_lowest_stack(%ebp)
22412+
22413+ popl %eax
22414+ popl %ecx
22415+ popl %edi
22416+ ret
22417+ENDPROC(pax_erase_kstack)
22418+#endif
22419+
22420+.macro __SAVE_ALL _DS
22421 cld
22422 PUSH_GS
22423 pushl_cfi %fs
22424@@ -206,7 +347,7 @@
22425 CFI_REL_OFFSET ecx, 0
22426 pushl_cfi %ebx
22427 CFI_REL_OFFSET ebx, 0
22428- movl $(__USER_DS), %edx
22429+ movl $\_DS, %edx
22430 movl %edx, %ds
22431 movl %edx, %es
22432 movl $(__KERNEL_PERCPU), %edx
22433@@ -214,6 +355,15 @@
22434 SET_KERNEL_GS %edx
22435 .endm
22436
22437+.macro SAVE_ALL
22438+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22439+ __SAVE_ALL __KERNEL_DS
22440+ pax_enter_kernel
22441+#else
22442+ __SAVE_ALL __USER_DS
22443+#endif
22444+.endm
22445+
22446 .macro RESTORE_INT_REGS
22447 popl_cfi %ebx
22448 CFI_RESTORE ebx
22449@@ -297,7 +447,7 @@ ENTRY(ret_from_fork)
22450 popfl_cfi
22451 jmp syscall_exit
22452 CFI_ENDPROC
22453-END(ret_from_fork)
22454+ENDPROC(ret_from_fork)
22455
22456 ENTRY(ret_from_kernel_thread)
22457 CFI_STARTPROC
22458@@ -340,7 +490,15 @@ ret_from_intr:
22459 andl $SEGMENT_RPL_MASK, %eax
22460 #endif
22461 cmpl $USER_RPL, %eax
22462+
22463+#ifdef CONFIG_PAX_KERNEXEC
22464+ jae resume_userspace
22465+
22466+ pax_exit_kernel
22467+ jmp resume_kernel
22468+#else
22469 jb resume_kernel # not returning to v8086 or userspace
22470+#endif
22471
22472 ENTRY(resume_userspace)
22473 LOCKDEP_SYS_EXIT
22474@@ -352,8 +510,8 @@ ENTRY(resume_userspace)
22475 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
22476 # int/exception return?
22477 jne work_pending
22478- jmp restore_all
22479-END(ret_from_exception)
22480+ jmp restore_all_pax
22481+ENDPROC(ret_from_exception)
22482
22483 #ifdef CONFIG_PREEMPT
22484 ENTRY(resume_kernel)
22485@@ -365,7 +523,7 @@ need_resched:
22486 jz restore_all
22487 call preempt_schedule_irq
22488 jmp need_resched
22489-END(resume_kernel)
22490+ENDPROC(resume_kernel)
22491 #endif
22492 CFI_ENDPROC
22493
22494@@ -395,30 +553,45 @@ sysenter_past_esp:
22495 /*CFI_REL_OFFSET cs, 0*/
22496 /*
22497 * Push current_thread_info()->sysenter_return to the stack.
22498- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
22499- * pushed above; +8 corresponds to copy_thread's esp0 setting.
22500 */
22501- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
22502+ pushl_cfi $0
22503 CFI_REL_OFFSET eip, 0
22504
22505 pushl_cfi %eax
22506 SAVE_ALL
22507+ GET_THREAD_INFO(%ebp)
22508+ movl TI_sysenter_return(%ebp),%ebp
22509+ movl %ebp,PT_EIP(%esp)
22510 ENABLE_INTERRUPTS(CLBR_NONE)
22511
22512 /*
22513 * Load the potential sixth argument from user stack.
22514 * Careful about security.
22515 */
22516+ movl PT_OLDESP(%esp),%ebp
22517+
22518+#ifdef CONFIG_PAX_MEMORY_UDEREF
22519+ mov PT_OLDSS(%esp),%ds
22520+1: movl %ds:(%ebp),%ebp
22521+ push %ss
22522+ pop %ds
22523+#else
22524 cmpl $__PAGE_OFFSET-3,%ebp
22525 jae syscall_fault
22526 ASM_STAC
22527 1: movl (%ebp),%ebp
22528 ASM_CLAC
22529+#endif
22530+
22531 movl %ebp,PT_EBP(%esp)
22532 _ASM_EXTABLE(1b,syscall_fault)
22533
22534 GET_THREAD_INFO(%ebp)
22535
22536+#ifdef CONFIG_PAX_RANDKSTACK
22537+ pax_erase_kstack
22538+#endif
22539+
22540 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22541 jnz sysenter_audit
22542 sysenter_do_call:
22543@@ -434,12 +607,24 @@ sysenter_after_call:
22544 testl $_TIF_ALLWORK_MASK, %ecx
22545 jne sysexit_audit
22546 sysenter_exit:
22547+
22548+#ifdef CONFIG_PAX_RANDKSTACK
22549+ pushl_cfi %eax
22550+ movl %esp, %eax
22551+ call pax_randomize_kstack
22552+ popl_cfi %eax
22553+#endif
22554+
22555+ pax_erase_kstack
22556+
22557 /* if something modifies registers it must also disable sysexit */
22558 movl PT_EIP(%esp), %edx
22559 movl PT_OLDESP(%esp), %ecx
22560 xorl %ebp,%ebp
22561 TRACE_IRQS_ON
22562 1: mov PT_FS(%esp), %fs
22563+2: mov PT_DS(%esp), %ds
22564+3: mov PT_ES(%esp), %es
22565 PTGS_TO_GS
22566 ENABLE_INTERRUPTS_SYSEXIT
22567
22568@@ -453,6 +638,9 @@ sysenter_audit:
22569 pushl_cfi PT_ESI(%esp) /* a3: 5th arg */
22570 pushl_cfi PT_EDX+4(%esp) /* a2: 4th arg */
22571 call __audit_syscall_entry
22572+
22573+ pax_erase_kstack
22574+
22575 popl_cfi %ecx /* get that remapped edx off the stack */
22576 popl_cfi %ecx /* get that remapped esi off the stack */
22577 movl PT_EAX(%esp),%eax /* reload syscall number */
22578@@ -479,10 +667,16 @@ sysexit_audit:
22579
22580 CFI_ENDPROC
22581 .pushsection .fixup,"ax"
22582-2: movl $0,PT_FS(%esp)
22583+4: movl $0,PT_FS(%esp)
22584+ jmp 1b
22585+5: movl $0,PT_DS(%esp)
22586+ jmp 1b
22587+6: movl $0,PT_ES(%esp)
22588 jmp 1b
22589 .popsection
22590- _ASM_EXTABLE(1b,2b)
22591+ _ASM_EXTABLE(1b,4b)
22592+ _ASM_EXTABLE(2b,5b)
22593+ _ASM_EXTABLE(3b,6b)
22594 PTGS_TO_GS_EX
22595 ENDPROC(ia32_sysenter_target)
22596
22597@@ -493,6 +687,11 @@ ENTRY(system_call)
22598 pushl_cfi %eax # save orig_eax
22599 SAVE_ALL
22600 GET_THREAD_INFO(%ebp)
22601+
22602+#ifdef CONFIG_PAX_RANDKSTACK
22603+ pax_erase_kstack
22604+#endif
22605+
22606 # system call tracing in operation / emulation
22607 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22608 jnz syscall_trace_entry
22609@@ -512,6 +711,15 @@ syscall_exit:
22610 testl $_TIF_ALLWORK_MASK, %ecx # current->work
22611 jne syscall_exit_work
22612
22613+restore_all_pax:
22614+
22615+#ifdef CONFIG_PAX_RANDKSTACK
22616+ movl %esp, %eax
22617+ call pax_randomize_kstack
22618+#endif
22619+
22620+ pax_erase_kstack
22621+
22622 restore_all:
22623 TRACE_IRQS_IRET
22624 restore_all_notrace:
22625@@ -566,14 +774,34 @@ ldt_ss:
22626 * compensating for the offset by changing to the ESPFIX segment with
22627 * a base address that matches for the difference.
22628 */
22629-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
22630+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
22631 mov %esp, %edx /* load kernel esp */
22632 mov PT_OLDESP(%esp), %eax /* load userspace esp */
22633 mov %dx, %ax /* eax: new kernel esp */
22634 sub %eax, %edx /* offset (low word is 0) */
22635+#ifdef CONFIG_SMP
22636+ movl PER_CPU_VAR(cpu_number), %ebx
22637+ shll $PAGE_SHIFT_asm, %ebx
22638+ addl $cpu_gdt_table, %ebx
22639+#else
22640+ movl $cpu_gdt_table, %ebx
22641+#endif
22642 shr $16, %edx
22643- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
22644- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
22645+
22646+#ifdef CONFIG_PAX_KERNEXEC
22647+ mov %cr0, %esi
22648+ btr $16, %esi
22649+ mov %esi, %cr0
22650+#endif
22651+
22652+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
22653+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
22654+
22655+#ifdef CONFIG_PAX_KERNEXEC
22656+ bts $16, %esi
22657+ mov %esi, %cr0
22658+#endif
22659+
22660 pushl_cfi $__ESPFIX_SS
22661 pushl_cfi %eax /* new kernel esp */
22662 /* Disable interrupts, but do not irqtrace this section: we
22663@@ -603,20 +831,18 @@ work_resched:
22664 movl TI_flags(%ebp), %ecx
22665 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
22666 # than syscall tracing?
22667- jz restore_all
22668+ jz restore_all_pax
22669 testb $_TIF_NEED_RESCHED, %cl
22670 jnz work_resched
22671
22672 work_notifysig: # deal with pending signals and
22673 # notify-resume requests
22674+ movl %esp, %eax
22675 #ifdef CONFIG_VM86
22676 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
22677- movl %esp, %eax
22678 jne work_notifysig_v86 # returning to kernel-space or
22679 # vm86-space
22680 1:
22681-#else
22682- movl %esp, %eax
22683 #endif
22684 TRACE_IRQS_ON
22685 ENABLE_INTERRUPTS(CLBR_NONE)
22686@@ -637,7 +863,7 @@ work_notifysig_v86:
22687 movl %eax, %esp
22688 jmp 1b
22689 #endif
22690-END(work_pending)
22691+ENDPROC(work_pending)
22692
22693 # perform syscall exit tracing
22694 ALIGN
22695@@ -645,11 +871,14 @@ syscall_trace_entry:
22696 movl $-ENOSYS,PT_EAX(%esp)
22697 movl %esp, %eax
22698 call syscall_trace_enter
22699+
22700+ pax_erase_kstack
22701+
22702 /* What it returned is what we'll actually use. */
22703 cmpl $(NR_syscalls), %eax
22704 jnae syscall_call
22705 jmp syscall_exit
22706-END(syscall_trace_entry)
22707+ENDPROC(syscall_trace_entry)
22708
22709 # perform syscall exit tracing
22710 ALIGN
22711@@ -662,26 +891,30 @@ syscall_exit_work:
22712 movl %esp, %eax
22713 call syscall_trace_leave
22714 jmp resume_userspace
22715-END(syscall_exit_work)
22716+ENDPROC(syscall_exit_work)
22717 CFI_ENDPROC
22718
22719 RING0_INT_FRAME # can't unwind into user space anyway
22720 syscall_fault:
22721+#ifdef CONFIG_PAX_MEMORY_UDEREF
22722+ push %ss
22723+ pop %ds
22724+#endif
22725 ASM_CLAC
22726 GET_THREAD_INFO(%ebp)
22727 movl $-EFAULT,PT_EAX(%esp)
22728 jmp resume_userspace
22729-END(syscall_fault)
22730+ENDPROC(syscall_fault)
22731
22732 syscall_badsys:
22733 movl $-ENOSYS,%eax
22734 jmp syscall_after_call
22735-END(syscall_badsys)
22736+ENDPROC(syscall_badsys)
22737
22738 sysenter_badsys:
22739 movl $-ENOSYS,%eax
22740 jmp sysenter_after_call
22741-END(sysenter_badsys)
22742+ENDPROC(sysenter_badsys)
22743 CFI_ENDPROC
22744
22745 .macro FIXUP_ESPFIX_STACK
22746@@ -694,8 +927,15 @@ END(sysenter_badsys)
22747 */
22748 #ifdef CONFIG_X86_ESPFIX32
22749 /* fixup the stack */
22750- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
22751- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
22752+#ifdef CONFIG_SMP
22753+ movl PER_CPU_VAR(cpu_number), %ebx
22754+ shll $PAGE_SHIFT_asm, %ebx
22755+ addl $cpu_gdt_table, %ebx
22756+#else
22757+ movl $cpu_gdt_table, %ebx
22758+#endif
22759+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
22760+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
22761 shl $16, %eax
22762 addl %esp, %eax /* the adjusted stack pointer */
22763 pushl_cfi $__KERNEL_DS
22764@@ -751,7 +991,7 @@ vector=vector+1
22765 .endr
22766 2: jmp common_interrupt
22767 .endr
22768-END(irq_entries_start)
22769+ENDPROC(irq_entries_start)
22770
22771 .previous
22772 END(interrupt)
22773@@ -808,7 +1048,7 @@ ENTRY(coprocessor_error)
22774 pushl_cfi $do_coprocessor_error
22775 jmp error_code
22776 CFI_ENDPROC
22777-END(coprocessor_error)
22778+ENDPROC(coprocessor_error)
22779
22780 ENTRY(simd_coprocessor_error)
22781 RING0_INT_FRAME
22782@@ -821,7 +1061,7 @@ ENTRY(simd_coprocessor_error)
22783 .section .altinstructions,"a"
22784 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
22785 .previous
22786-.section .altinstr_replacement,"ax"
22787+.section .altinstr_replacement,"a"
22788 663: pushl $do_simd_coprocessor_error
22789 664:
22790 .previous
22791@@ -830,7 +1070,7 @@ ENTRY(simd_coprocessor_error)
22792 #endif
22793 jmp error_code
22794 CFI_ENDPROC
22795-END(simd_coprocessor_error)
22796+ENDPROC(simd_coprocessor_error)
22797
22798 ENTRY(device_not_available)
22799 RING0_INT_FRAME
22800@@ -839,18 +1079,18 @@ ENTRY(device_not_available)
22801 pushl_cfi $do_device_not_available
22802 jmp error_code
22803 CFI_ENDPROC
22804-END(device_not_available)
22805+ENDPROC(device_not_available)
22806
22807 #ifdef CONFIG_PARAVIRT
22808 ENTRY(native_iret)
22809 iret
22810 _ASM_EXTABLE(native_iret, iret_exc)
22811-END(native_iret)
22812+ENDPROC(native_iret)
22813
22814 ENTRY(native_irq_enable_sysexit)
22815 sti
22816 sysexit
22817-END(native_irq_enable_sysexit)
22818+ENDPROC(native_irq_enable_sysexit)
22819 #endif
22820
22821 ENTRY(overflow)
22822@@ -860,7 +1100,7 @@ ENTRY(overflow)
22823 pushl_cfi $do_overflow
22824 jmp error_code
22825 CFI_ENDPROC
22826-END(overflow)
22827+ENDPROC(overflow)
22828
22829 ENTRY(bounds)
22830 RING0_INT_FRAME
22831@@ -869,7 +1109,7 @@ ENTRY(bounds)
22832 pushl_cfi $do_bounds
22833 jmp error_code
22834 CFI_ENDPROC
22835-END(bounds)
22836+ENDPROC(bounds)
22837
22838 ENTRY(invalid_op)
22839 RING0_INT_FRAME
22840@@ -878,7 +1118,7 @@ ENTRY(invalid_op)
22841 pushl_cfi $do_invalid_op
22842 jmp error_code
22843 CFI_ENDPROC
22844-END(invalid_op)
22845+ENDPROC(invalid_op)
22846
22847 ENTRY(coprocessor_segment_overrun)
22848 RING0_INT_FRAME
22849@@ -887,7 +1127,7 @@ ENTRY(coprocessor_segment_overrun)
22850 pushl_cfi $do_coprocessor_segment_overrun
22851 jmp error_code
22852 CFI_ENDPROC
22853-END(coprocessor_segment_overrun)
22854+ENDPROC(coprocessor_segment_overrun)
22855
22856 ENTRY(invalid_TSS)
22857 RING0_EC_FRAME
22858@@ -895,7 +1135,7 @@ ENTRY(invalid_TSS)
22859 pushl_cfi $do_invalid_TSS
22860 jmp error_code
22861 CFI_ENDPROC
22862-END(invalid_TSS)
22863+ENDPROC(invalid_TSS)
22864
22865 ENTRY(segment_not_present)
22866 RING0_EC_FRAME
22867@@ -903,7 +1143,7 @@ ENTRY(segment_not_present)
22868 pushl_cfi $do_segment_not_present
22869 jmp error_code
22870 CFI_ENDPROC
22871-END(segment_not_present)
22872+ENDPROC(segment_not_present)
22873
22874 ENTRY(stack_segment)
22875 RING0_EC_FRAME
22876@@ -911,7 +1151,7 @@ ENTRY(stack_segment)
22877 pushl_cfi $do_stack_segment
22878 jmp error_code
22879 CFI_ENDPROC
22880-END(stack_segment)
22881+ENDPROC(stack_segment)
22882
22883 ENTRY(alignment_check)
22884 RING0_EC_FRAME
22885@@ -919,7 +1159,7 @@ ENTRY(alignment_check)
22886 pushl_cfi $do_alignment_check
22887 jmp error_code
22888 CFI_ENDPROC
22889-END(alignment_check)
22890+ENDPROC(alignment_check)
22891
22892 ENTRY(divide_error)
22893 RING0_INT_FRAME
22894@@ -928,7 +1168,7 @@ ENTRY(divide_error)
22895 pushl_cfi $do_divide_error
22896 jmp error_code
22897 CFI_ENDPROC
22898-END(divide_error)
22899+ENDPROC(divide_error)
22900
22901 #ifdef CONFIG_X86_MCE
22902 ENTRY(machine_check)
22903@@ -938,7 +1178,7 @@ ENTRY(machine_check)
22904 pushl_cfi machine_check_vector
22905 jmp error_code
22906 CFI_ENDPROC
22907-END(machine_check)
22908+ENDPROC(machine_check)
22909 #endif
22910
22911 ENTRY(spurious_interrupt_bug)
22912@@ -948,7 +1188,7 @@ ENTRY(spurious_interrupt_bug)
22913 pushl_cfi $do_spurious_interrupt_bug
22914 jmp error_code
22915 CFI_ENDPROC
22916-END(spurious_interrupt_bug)
22917+ENDPROC(spurious_interrupt_bug)
22918
22919 #ifdef CONFIG_XEN
22920 /* Xen doesn't set %esp to be precisely what the normal sysenter
22921@@ -1057,7 +1297,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
22922
22923 ENTRY(mcount)
22924 ret
22925-END(mcount)
22926+ENDPROC(mcount)
22927
22928 ENTRY(ftrace_caller)
22929 pushl %eax
22930@@ -1087,7 +1327,7 @@ ftrace_graph_call:
22931 .globl ftrace_stub
22932 ftrace_stub:
22933 ret
22934-END(ftrace_caller)
22935+ENDPROC(ftrace_caller)
22936
22937 ENTRY(ftrace_regs_caller)
22938 pushf /* push flags before compare (in cs location) */
22939@@ -1185,7 +1425,7 @@ trace:
22940 popl %ecx
22941 popl %eax
22942 jmp ftrace_stub
22943-END(mcount)
22944+ENDPROC(mcount)
22945 #endif /* CONFIG_DYNAMIC_FTRACE */
22946 #endif /* CONFIG_FUNCTION_TRACER */
22947
22948@@ -1203,7 +1443,7 @@ ENTRY(ftrace_graph_caller)
22949 popl %ecx
22950 popl %eax
22951 ret
22952-END(ftrace_graph_caller)
22953+ENDPROC(ftrace_graph_caller)
22954
22955 .globl return_to_handler
22956 return_to_handler:
22957@@ -1264,15 +1504,18 @@ error_code:
22958 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
22959 REG_TO_PTGS %ecx
22960 SET_KERNEL_GS %ecx
22961- movl $(__USER_DS), %ecx
22962+ movl $(__KERNEL_DS), %ecx
22963 movl %ecx, %ds
22964 movl %ecx, %es
22965+
22966+ pax_enter_kernel
22967+
22968 TRACE_IRQS_OFF
22969 movl %esp,%eax # pt_regs pointer
22970 call *%edi
22971 jmp ret_from_exception
22972 CFI_ENDPROC
22973-END(page_fault)
22974+ENDPROC(page_fault)
22975
22976 /*
22977 * Debug traps and NMI can happen at the one SYSENTER instruction
22978@@ -1315,7 +1558,7 @@ debug_stack_correct:
22979 call do_debug
22980 jmp ret_from_exception
22981 CFI_ENDPROC
22982-END(debug)
22983+ENDPROC(debug)
22984
22985 /*
22986 * NMI is doubly nasty. It can happen _while_ we're handling
22987@@ -1355,6 +1598,9 @@ nmi_stack_correct:
22988 xorl %edx,%edx # zero error code
22989 movl %esp,%eax # pt_regs pointer
22990 call do_nmi
22991+
22992+ pax_exit_kernel
22993+
22994 jmp restore_all_notrace
22995 CFI_ENDPROC
22996
22997@@ -1392,13 +1638,16 @@ nmi_espfix_stack:
22998 FIXUP_ESPFIX_STACK # %eax == %esp
22999 xorl %edx,%edx # zero error code
23000 call do_nmi
23001+
23002+ pax_exit_kernel
23003+
23004 RESTORE_REGS
23005 lss 12+4(%esp), %esp # back to espfix stack
23006 CFI_ADJUST_CFA_OFFSET -24
23007 jmp irq_return
23008 #endif
23009 CFI_ENDPROC
23010-END(nmi)
23011+ENDPROC(nmi)
23012
23013 ENTRY(int3)
23014 RING0_INT_FRAME
23015@@ -1411,14 +1660,14 @@ ENTRY(int3)
23016 call do_int3
23017 jmp ret_from_exception
23018 CFI_ENDPROC
23019-END(int3)
23020+ENDPROC(int3)
23021
23022 ENTRY(general_protection)
23023 RING0_EC_FRAME
23024 pushl_cfi $do_general_protection
23025 jmp error_code
23026 CFI_ENDPROC
23027-END(general_protection)
23028+ENDPROC(general_protection)
23029
23030 #ifdef CONFIG_KVM_GUEST
23031 ENTRY(async_page_fault)
23032@@ -1427,6 +1676,6 @@ ENTRY(async_page_fault)
23033 pushl_cfi $do_async_page_fault
23034 jmp error_code
23035 CFI_ENDPROC
23036-END(async_page_fault)
23037+ENDPROC(async_page_fault)
23038 #endif
23039
23040diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
23041index f0095a7..ec77893 100644
23042--- a/arch/x86/kernel/entry_64.S
23043+++ b/arch/x86/kernel/entry_64.S
23044@@ -59,6 +59,8 @@
23045 #include <asm/smap.h>
23046 #include <asm/pgtable_types.h>
23047 #include <linux/err.h>
23048+#include <asm/pgtable.h>
23049+#include <asm/alternative-asm.h>
23050
23051 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
23052 #include <linux/elf-em.h>
23053@@ -81,6 +83,431 @@ ENTRY(native_usergs_sysret64)
23054 ENDPROC(native_usergs_sysret64)
23055 #endif /* CONFIG_PARAVIRT */
23056
23057+ .macro ljmpq sel, off
23058+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
23059+ .byte 0x48; ljmp *1234f(%rip)
23060+ .pushsection .rodata
23061+ .align 16
23062+ 1234: .quad \off; .word \sel
23063+ .popsection
23064+#else
23065+ pushq $\sel
23066+ pushq $\off
23067+ lretq
23068+#endif
23069+ .endm
23070+
23071+ .macro pax_enter_kernel
23072+ pax_set_fptr_mask
23073+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23074+ call pax_enter_kernel
23075+#endif
23076+ .endm
23077+
23078+ .macro pax_exit_kernel
23079+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23080+ call pax_exit_kernel
23081+#endif
23082+
23083+ .endm
23084+
23085+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23086+ENTRY(pax_enter_kernel)
23087+ pushq %rdi
23088+
23089+#ifdef CONFIG_PARAVIRT
23090+ PV_SAVE_REGS(CLBR_RDI)
23091+#endif
23092+
23093+#ifdef CONFIG_PAX_KERNEXEC
23094+ GET_CR0_INTO_RDI
23095+ bts $16,%rdi
23096+ jnc 3f
23097+ mov %cs,%edi
23098+ cmp $__KERNEL_CS,%edi
23099+ jnz 2f
23100+1:
23101+#endif
23102+
23103+#ifdef CONFIG_PAX_MEMORY_UDEREF
23104+ 661: jmp 111f
23105+ .pushsection .altinstr_replacement, "a"
23106+ 662: ASM_NOP2
23107+ .popsection
23108+ .pushsection .altinstructions, "a"
23109+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23110+ .popsection
23111+ GET_CR3_INTO_RDI
23112+ cmp $0,%dil
23113+ jnz 112f
23114+ mov $__KERNEL_DS,%edi
23115+ mov %edi,%ss
23116+ jmp 111f
23117+112: cmp $1,%dil
23118+ jz 113f
23119+ ud2
23120+113: sub $4097,%rdi
23121+ bts $63,%rdi
23122+ SET_RDI_INTO_CR3
23123+ mov $__UDEREF_KERNEL_DS,%edi
23124+ mov %edi,%ss
23125+111:
23126+#endif
23127+
23128+#ifdef CONFIG_PARAVIRT
23129+ PV_RESTORE_REGS(CLBR_RDI)
23130+#endif
23131+
23132+ popq %rdi
23133+ pax_force_retaddr
23134+ retq
23135+
23136+#ifdef CONFIG_PAX_KERNEXEC
23137+2: ljmpq __KERNEL_CS,1b
23138+3: ljmpq __KERNEXEC_KERNEL_CS,4f
23139+4: SET_RDI_INTO_CR0
23140+ jmp 1b
23141+#endif
23142+ENDPROC(pax_enter_kernel)
23143+
23144+ENTRY(pax_exit_kernel)
23145+ pushq %rdi
23146+
23147+#ifdef CONFIG_PARAVIRT
23148+ PV_SAVE_REGS(CLBR_RDI)
23149+#endif
23150+
23151+#ifdef CONFIG_PAX_KERNEXEC
23152+ mov %cs,%rdi
23153+ cmp $__KERNEXEC_KERNEL_CS,%edi
23154+ jz 2f
23155+ GET_CR0_INTO_RDI
23156+ bts $16,%rdi
23157+ jnc 4f
23158+1:
23159+#endif
23160+
23161+#ifdef CONFIG_PAX_MEMORY_UDEREF
23162+ 661: jmp 111f
23163+ .pushsection .altinstr_replacement, "a"
23164+ 662: ASM_NOP2
23165+ .popsection
23166+ .pushsection .altinstructions, "a"
23167+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23168+ .popsection
23169+ mov %ss,%edi
23170+ cmp $__UDEREF_KERNEL_DS,%edi
23171+ jnz 111f
23172+ GET_CR3_INTO_RDI
23173+ cmp $0,%dil
23174+ jz 112f
23175+ ud2
23176+112: add $4097,%rdi
23177+ bts $63,%rdi
23178+ SET_RDI_INTO_CR3
23179+ mov $__KERNEL_DS,%edi
23180+ mov %edi,%ss
23181+111:
23182+#endif
23183+
23184+#ifdef CONFIG_PARAVIRT
23185+ PV_RESTORE_REGS(CLBR_RDI);
23186+#endif
23187+
23188+ popq %rdi
23189+ pax_force_retaddr
23190+ retq
23191+
23192+#ifdef CONFIG_PAX_KERNEXEC
23193+2: GET_CR0_INTO_RDI
23194+ btr $16,%rdi
23195+ jnc 4f
23196+ ljmpq __KERNEL_CS,3f
23197+3: SET_RDI_INTO_CR0
23198+ jmp 1b
23199+4: ud2
23200+ jmp 4b
23201+#endif
23202+ENDPROC(pax_exit_kernel)
23203+#endif
23204+
23205+ .macro pax_enter_kernel_user
23206+ pax_set_fptr_mask
23207+#ifdef CONFIG_PAX_MEMORY_UDEREF
23208+ call pax_enter_kernel_user
23209+#endif
23210+ .endm
23211+
23212+ .macro pax_exit_kernel_user
23213+#ifdef CONFIG_PAX_MEMORY_UDEREF
23214+ call pax_exit_kernel_user
23215+#endif
23216+#ifdef CONFIG_PAX_RANDKSTACK
23217+ pushq %rax
23218+ pushq %r11
23219+ call pax_randomize_kstack
23220+ popq %r11
23221+ popq %rax
23222+#endif
23223+ .endm
23224+
23225+#ifdef CONFIG_PAX_MEMORY_UDEREF
23226+ENTRY(pax_enter_kernel_user)
23227+ pushq %rdi
23228+ pushq %rbx
23229+
23230+#ifdef CONFIG_PARAVIRT
23231+ PV_SAVE_REGS(CLBR_RDI)
23232+#endif
23233+
23234+ 661: jmp 111f
23235+ .pushsection .altinstr_replacement, "a"
23236+ 662: ASM_NOP2
23237+ .popsection
23238+ .pushsection .altinstructions, "a"
23239+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23240+ .popsection
23241+ GET_CR3_INTO_RDI
23242+ cmp $1,%dil
23243+ jnz 4f
23244+ sub $4097,%rdi
23245+ bts $63,%rdi
23246+ SET_RDI_INTO_CR3
23247+ jmp 3f
23248+111:
23249+
23250+ GET_CR3_INTO_RDI
23251+ mov %rdi,%rbx
23252+ add $__START_KERNEL_map,%rbx
23253+ sub phys_base(%rip),%rbx
23254+
23255+#ifdef CONFIG_PARAVIRT
23256+ cmpl $0, pv_info+PARAVIRT_enabled
23257+ jz 1f
23258+ pushq %rdi
23259+ i = 0
23260+ .rept USER_PGD_PTRS
23261+ mov i*8(%rbx),%rsi
23262+ mov $0,%sil
23263+ lea i*8(%rbx),%rdi
23264+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23265+ i = i + 1
23266+ .endr
23267+ popq %rdi
23268+ jmp 2f
23269+1:
23270+#endif
23271+
23272+ i = 0
23273+ .rept USER_PGD_PTRS
23274+ movb $0,i*8(%rbx)
23275+ i = i + 1
23276+ .endr
23277+
23278+2: SET_RDI_INTO_CR3
23279+
23280+#ifdef CONFIG_PAX_KERNEXEC
23281+ GET_CR0_INTO_RDI
23282+ bts $16,%rdi
23283+ SET_RDI_INTO_CR0
23284+#endif
23285+
23286+3:
23287+
23288+#ifdef CONFIG_PARAVIRT
23289+ PV_RESTORE_REGS(CLBR_RDI)
23290+#endif
23291+
23292+ popq %rbx
23293+ popq %rdi
23294+ pax_force_retaddr
23295+ retq
23296+4: ud2
23297+ENDPROC(pax_enter_kernel_user)
23298+
23299+ENTRY(pax_exit_kernel_user)
23300+ pushq %rdi
23301+ pushq %rbx
23302+
23303+#ifdef CONFIG_PARAVIRT
23304+ PV_SAVE_REGS(CLBR_RDI)
23305+#endif
23306+
23307+ GET_CR3_INTO_RDI
23308+ 661: jmp 1f
23309+ .pushsection .altinstr_replacement, "a"
23310+ 662: ASM_NOP2
23311+ .popsection
23312+ .pushsection .altinstructions, "a"
23313+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23314+ .popsection
23315+ cmp $0,%dil
23316+ jnz 3f
23317+ add $4097,%rdi
23318+ bts $63,%rdi
23319+ SET_RDI_INTO_CR3
23320+ jmp 2f
23321+1:
23322+
23323+ mov %rdi,%rbx
23324+
23325+#ifdef CONFIG_PAX_KERNEXEC
23326+ GET_CR0_INTO_RDI
23327+ btr $16,%rdi
23328+ jnc 3f
23329+ SET_RDI_INTO_CR0
23330+#endif
23331+
23332+ add $__START_KERNEL_map,%rbx
23333+ sub phys_base(%rip),%rbx
23334+
23335+#ifdef CONFIG_PARAVIRT
23336+ cmpl $0, pv_info+PARAVIRT_enabled
23337+ jz 1f
23338+ i = 0
23339+ .rept USER_PGD_PTRS
23340+ mov i*8(%rbx),%rsi
23341+ mov $0x67,%sil
23342+ lea i*8(%rbx),%rdi
23343+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23344+ i = i + 1
23345+ .endr
23346+ jmp 2f
23347+1:
23348+#endif
23349+
23350+ i = 0
23351+ .rept USER_PGD_PTRS
23352+ movb $0x67,i*8(%rbx)
23353+ i = i + 1
23354+ .endr
23355+2:
23356+
23357+#ifdef CONFIG_PARAVIRT
23358+ PV_RESTORE_REGS(CLBR_RDI)
23359+#endif
23360+
23361+ popq %rbx
23362+ popq %rdi
23363+ pax_force_retaddr
23364+ retq
23365+3: ud2
23366+ENDPROC(pax_exit_kernel_user)
23367+#endif
23368+
23369+ .macro pax_enter_kernel_nmi
23370+ pax_set_fptr_mask
23371+
23372+#ifdef CONFIG_PAX_KERNEXEC
23373+ GET_CR0_INTO_RDI
23374+ bts $16,%rdi
23375+ jc 110f
23376+ SET_RDI_INTO_CR0
23377+ or $2,%ebx
23378+110:
23379+#endif
23380+
23381+#ifdef CONFIG_PAX_MEMORY_UDEREF
23382+ 661: jmp 111f
23383+ .pushsection .altinstr_replacement, "a"
23384+ 662: ASM_NOP2
23385+ .popsection
23386+ .pushsection .altinstructions, "a"
23387+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23388+ .popsection
23389+ GET_CR3_INTO_RDI
23390+ cmp $0,%dil
23391+ jz 111f
23392+ sub $4097,%rdi
23393+ or $4,%ebx
23394+ bts $63,%rdi
23395+ SET_RDI_INTO_CR3
23396+ mov $__UDEREF_KERNEL_DS,%edi
23397+ mov %edi,%ss
23398+111:
23399+#endif
23400+ .endm
23401+
23402+ .macro pax_exit_kernel_nmi
23403+#ifdef CONFIG_PAX_KERNEXEC
23404+ btr $1,%ebx
23405+ jnc 110f
23406+ GET_CR0_INTO_RDI
23407+ btr $16,%rdi
23408+ SET_RDI_INTO_CR0
23409+110:
23410+#endif
23411+
23412+#ifdef CONFIG_PAX_MEMORY_UDEREF
23413+ btr $2,%ebx
23414+ jnc 111f
23415+ GET_CR3_INTO_RDI
23416+ add $4097,%rdi
23417+ bts $63,%rdi
23418+ SET_RDI_INTO_CR3
23419+ mov $__KERNEL_DS,%edi
23420+ mov %edi,%ss
23421+111:
23422+#endif
23423+ .endm
23424+
23425+ .macro pax_erase_kstack
23426+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23427+ call pax_erase_kstack
23428+#endif
23429+ .endm
23430+
23431+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23432+ENTRY(pax_erase_kstack)
23433+ pushq %rdi
23434+ pushq %rcx
23435+ pushq %rax
23436+ pushq %r11
23437+
23438+ GET_THREAD_INFO(%r11)
23439+ mov TI_lowest_stack(%r11), %rdi
23440+ mov $-0xBEEF, %rax
23441+ std
23442+
23443+1: mov %edi, %ecx
23444+ and $THREAD_SIZE_asm - 1, %ecx
23445+ shr $3, %ecx
23446+ repne scasq
23447+ jecxz 2f
23448+
23449+ cmp $2*8, %ecx
23450+ jc 2f
23451+
23452+ mov $2*8, %ecx
23453+ repe scasq
23454+ jecxz 2f
23455+ jne 1b
23456+
23457+2: cld
23458+ or $2*8, %rdi
23459+ mov %esp, %ecx
23460+ sub %edi, %ecx
23461+
23462+ cmp $THREAD_SIZE_asm, %rcx
23463+ jb 3f
23464+ ud2
23465+3:
23466+
23467+ shr $3, %ecx
23468+ rep stosq
23469+
23470+ mov TI_task_thread_sp0(%r11), %rdi
23471+ sub $256, %rdi
23472+ mov %rdi, TI_lowest_stack(%r11)
23473+
23474+ popq %r11
23475+ popq %rax
23476+ popq %rcx
23477+ popq %rdi
23478+ pax_force_retaddr
23479+ ret
23480+ENDPROC(pax_erase_kstack)
23481+#endif
23482
23483 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
23484 #ifdef CONFIG_TRACE_IRQFLAGS
23485@@ -117,7 +544,7 @@ ENDPROC(native_usergs_sysret64)
23486 .endm
23487
23488 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
23489- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
23490+ bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */
23491 jnc 1f
23492 TRACE_IRQS_ON_DEBUG
23493 1:
23494@@ -243,9 +670,52 @@ ENTRY(save_paranoid)
23495 js 1f /* negative -> in kernel */
23496 SWAPGS
23497 xorl %ebx,%ebx
23498-1: ret
23499+1:
23500+#ifdef CONFIG_PAX_MEMORY_UDEREF
23501+ testb $3, CS+8(%rsp)
23502+ jnz 1f
23503+ pax_enter_kernel
23504+ jmp 2f
23505+1: pax_enter_kernel_user
23506+2:
23507+#else
23508+ pax_enter_kernel
23509+#endif
23510+ pax_force_retaddr
23511+ ret
23512 CFI_ENDPROC
23513-END(save_paranoid)
23514+ENDPROC(save_paranoid)
23515+
23516+ENTRY(save_paranoid_nmi)
23517+ XCPT_FRAME 1 RDI+8
23518+ cld
23519+ movq_cfi rdi, RDI+8
23520+ movq_cfi rsi, RSI+8
23521+ movq_cfi rdx, RDX+8
23522+ movq_cfi rcx, RCX+8
23523+ movq_cfi rax, RAX+8
23524+ movq_cfi r8, R8+8
23525+ movq_cfi r9, R9+8
23526+ movq_cfi r10, R10+8
23527+ movq_cfi r11, R11+8
23528+ movq_cfi rbx, RBX+8
23529+ movq_cfi rbp, RBP+8
23530+ movq_cfi r12, R12+8
23531+ movq_cfi r13, R13+8
23532+ movq_cfi r14, R14+8
23533+ movq_cfi r15, R15+8
23534+ movl $1,%ebx
23535+ movl $MSR_GS_BASE,%ecx
23536+ rdmsr
23537+ testl %edx,%edx
23538+ js 1f /* negative -> in kernel */
23539+ SWAPGS
23540+ xorl %ebx,%ebx
23541+1: pax_enter_kernel_nmi
23542+ pax_force_retaddr
23543+ ret
23544+ CFI_ENDPROC
23545+ENDPROC(save_paranoid_nmi)
23546
23547 /*
23548 * A newly forked process directly context switches into this address.
23549@@ -266,7 +736,7 @@ ENTRY(ret_from_fork)
23550
23551 RESTORE_REST
23552
23553- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23554+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23555 jz 1f
23556
23557 /*
23558@@ -279,15 +749,13 @@ ENTRY(ret_from_fork)
23559 jmp int_ret_from_sys_call
23560
23561 1:
23562- subq $REST_SKIP, %rsp # leave space for volatiles
23563- CFI_ADJUST_CFA_OFFSET REST_SKIP
23564 movq %rbp, %rdi
23565 call *%rbx
23566 movl $0, RAX(%rsp)
23567 RESTORE_REST
23568 jmp int_ret_from_sys_call
23569 CFI_ENDPROC
23570-END(ret_from_fork)
23571+ENDPROC(ret_from_fork)
23572
23573 /*
23574 * System call entry. Up to 6 arguments in registers are supported.
23575@@ -324,7 +792,7 @@ END(ret_from_fork)
23576 ENTRY(system_call)
23577 CFI_STARTPROC simple
23578 CFI_SIGNAL_FRAME
23579- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
23580+ CFI_DEF_CFA rsp,0
23581 CFI_REGISTER rip,rcx
23582 /*CFI_REGISTER rflags,r11*/
23583 SWAPGS_UNSAFE_STACK
23584@@ -337,16 +805,23 @@ GLOBAL(system_call_after_swapgs)
23585
23586 movq %rsp,PER_CPU_VAR(old_rsp)
23587 movq PER_CPU_VAR(kernel_stack),%rsp
23588+ SAVE_ARGS 8*6, 0, rax_enosys=1
23589+ pax_enter_kernel_user
23590+
23591+#ifdef CONFIG_PAX_RANDKSTACK
23592+ pax_erase_kstack
23593+#endif
23594+
23595 /*
23596 * No need to follow this irqs off/on section - it's straight
23597 * and short:
23598 */
23599 ENABLE_INTERRUPTS(CLBR_NONE)
23600- SAVE_ARGS 8, 0, rax_enosys=1
23601 movq_cfi rax,(ORIG_RAX-ARGOFFSET)
23602 movq %rcx,RIP-ARGOFFSET(%rsp)
23603 CFI_REL_OFFSET rip,RIP-ARGOFFSET
23604- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23605+ GET_THREAD_INFO(%rcx)
23606+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
23607 jnz tracesys
23608 system_call_fastpath:
23609 #if __SYSCALL_MASK == ~0
23610@@ -376,10 +851,13 @@ ret_from_sys_call:
23611 * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
23612 * very bad.
23613 */
23614- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23615+ GET_THREAD_INFO(%rcx)
23616+ testl $_TIF_ALLWORK_MASK,TI_flags(%rcx)
23617 jnz int_ret_from_sys_call_fixup /* Go the the slow path */
23618
23619 CFI_REMEMBER_STATE
23620+ pax_exit_kernel_user
23621+ pax_erase_kstack
23622 /*
23623 * sysretq will re-enable interrupts:
23624 */
23625@@ -399,12 +877,15 @@ int_ret_from_sys_call_fixup:
23626
23627 /* Do syscall tracing */
23628 tracesys:
23629- leaq -REST_SKIP(%rsp), %rdi
23630+ movq %rsp, %rdi
23631 movq $AUDIT_ARCH_X86_64, %rsi
23632 call syscall_trace_enter_phase1
23633 test %rax, %rax
23634 jnz tracesys_phase2 /* if needed, run the slow path */
23635- LOAD_ARGS 0 /* else restore clobbered regs */
23636+
23637+ pax_erase_kstack
23638+
23639+ LOAD_ARGS /* else restore clobbered regs */
23640 jmp system_call_fastpath /* and return to the fast path */
23641
23642 tracesys_phase2:
23643@@ -415,12 +896,14 @@ tracesys_phase2:
23644 movq %rax,%rdx
23645 call syscall_trace_enter_phase2
23646
23647+ pax_erase_kstack
23648+
23649 /*
23650 * Reload arg registers from stack in case ptrace changed them.
23651 * We don't reload %rax because syscall_trace_entry_phase2() returned
23652 * the value it wants us to use in the table lookup.
23653 */
23654- LOAD_ARGS ARGOFFSET, 1
23655+ LOAD_ARGS 1
23656 RESTORE_REST
23657 #if __SYSCALL_MASK == ~0
23658 cmpq $__NR_syscall_max,%rax
23659@@ -451,7 +934,9 @@ GLOBAL(int_with_check)
23660 andl %edi,%edx
23661 jnz int_careful
23662 andl $~TS_COMPAT,TI_status(%rcx)
23663- jmp retint_swapgs
23664+ pax_exit_kernel_user
23665+ pax_erase_kstack
23666+ jmp retint_swapgs_pax
23667
23668 /* Either reschedule or signal or syscall exit tracking needed. */
23669 /* First do a reschedule test. */
23670@@ -497,7 +982,7 @@ int_restore_rest:
23671 TRACE_IRQS_OFF
23672 jmp int_with_check
23673 CFI_ENDPROC
23674-END(system_call)
23675+ENDPROC(system_call)
23676
23677 .macro FORK_LIKE func
23678 ENTRY(stub_\func)
23679@@ -510,9 +995,10 @@ ENTRY(stub_\func)
23680 DEFAULT_FRAME 0 8 /* offset 8: return address */
23681 call sys_\func
23682 RESTORE_TOP_OF_STACK %r11, 8
23683- ret $REST_SKIP /* pop extended registers */
23684+ pax_force_retaddr
23685+ ret
23686 CFI_ENDPROC
23687-END(stub_\func)
23688+ENDPROC(stub_\func)
23689 .endm
23690
23691 .macro FIXED_FRAME label,func
23692@@ -522,9 +1008,10 @@ ENTRY(\label)
23693 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
23694 call \func
23695 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
23696+ pax_force_retaddr
23697 ret
23698 CFI_ENDPROC
23699-END(\label)
23700+ENDPROC(\label)
23701 .endm
23702
23703 FORK_LIKE clone
23704@@ -543,7 +1030,7 @@ ENTRY(stub_execve)
23705 RESTORE_REST
23706 jmp int_ret_from_sys_call
23707 CFI_ENDPROC
23708-END(stub_execve)
23709+ENDPROC(stub_execve)
23710
23711 ENTRY(stub_execveat)
23712 CFI_STARTPROC
23713@@ -557,7 +1044,7 @@ ENTRY(stub_execveat)
23714 RESTORE_REST
23715 jmp int_ret_from_sys_call
23716 CFI_ENDPROC
23717-END(stub_execveat)
23718+ENDPROC(stub_execveat)
23719
23720 /*
23721 * sigreturn is special because it needs to restore all registers on return.
23722@@ -574,7 +1061,7 @@ ENTRY(stub_rt_sigreturn)
23723 RESTORE_REST
23724 jmp int_ret_from_sys_call
23725 CFI_ENDPROC
23726-END(stub_rt_sigreturn)
23727+ENDPROC(stub_rt_sigreturn)
23728
23729 #ifdef CONFIG_X86_X32_ABI
23730 ENTRY(stub_x32_rt_sigreturn)
23731@@ -588,7 +1075,7 @@ ENTRY(stub_x32_rt_sigreturn)
23732 RESTORE_REST
23733 jmp int_ret_from_sys_call
23734 CFI_ENDPROC
23735-END(stub_x32_rt_sigreturn)
23736+ENDPROC(stub_x32_rt_sigreturn)
23737
23738 ENTRY(stub_x32_execve)
23739 CFI_STARTPROC
23740@@ -602,7 +1089,7 @@ ENTRY(stub_x32_execve)
23741 RESTORE_REST
23742 jmp int_ret_from_sys_call
23743 CFI_ENDPROC
23744-END(stub_x32_execve)
23745+ENDPROC(stub_x32_execve)
23746
23747 ENTRY(stub_x32_execveat)
23748 CFI_STARTPROC
23749@@ -616,7 +1103,7 @@ ENTRY(stub_x32_execveat)
23750 RESTORE_REST
23751 jmp int_ret_from_sys_call
23752 CFI_ENDPROC
23753-END(stub_x32_execveat)
23754+ENDPROC(stub_x32_execveat)
23755
23756 #endif
23757
23758@@ -653,7 +1140,7 @@ vector=vector+1
23759 2: jmp common_interrupt
23760 .endr
23761 CFI_ENDPROC
23762-END(irq_entries_start)
23763+ENDPROC(irq_entries_start)
23764
23765 .previous
23766 END(interrupt)
23767@@ -670,28 +1157,29 @@ END(interrupt)
23768 /* 0(%rsp): ~(interrupt number) */
23769 .macro interrupt func
23770 /* reserve pt_regs for scratch regs and rbp */
23771- subq $ORIG_RAX-RBP, %rsp
23772- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
23773+ subq $ORIG_RAX, %rsp
23774+ CFI_ADJUST_CFA_OFFSET ORIG_RAX
23775 cld
23776- /* start from rbp in pt_regs and jump over */
23777- movq_cfi rdi, (RDI-RBP)
23778- movq_cfi rsi, (RSI-RBP)
23779- movq_cfi rdx, (RDX-RBP)
23780- movq_cfi rcx, (RCX-RBP)
23781- movq_cfi rax, (RAX-RBP)
23782- movq_cfi r8, (R8-RBP)
23783- movq_cfi r9, (R9-RBP)
23784- movq_cfi r10, (R10-RBP)
23785- movq_cfi r11, (R11-RBP)
23786+ /* start from r15 in pt_regs and jump over */
23787+ movq_cfi rdi, RDI
23788+ movq_cfi rsi, RSI
23789+ movq_cfi rdx, RDX
23790+ movq_cfi rcx, RCX
23791+ movq_cfi rax, RAX
23792+ movq_cfi r8, R8
23793+ movq_cfi r9, R9
23794+ movq_cfi r10, R10
23795+ movq_cfi r11, R11
23796+ movq_cfi r12, R12
23797
23798 /* Save rbp so that we can unwind from get_irq_regs() */
23799- movq_cfi rbp, 0
23800+ movq_cfi rbp, RBP
23801
23802 /* Save previous stack value */
23803 movq %rsp, %rsi
23804
23805- leaq -RBP(%rsp),%rdi /* arg1 for handler */
23806- testl $3, CS-RBP(%rsi)
23807+ movq %rsp,%rdi /* arg1 for handler */
23808+ testb $3, CS(%rsi)
23809 je 1f
23810 SWAPGS
23811 /*
23812@@ -711,6 +1199,18 @@ END(interrupt)
23813 0x06 /* DW_OP_deref */, \
23814 0x08 /* DW_OP_const1u */, SS+8-RBP, \
23815 0x22 /* DW_OP_plus */
23816+
23817+#ifdef CONFIG_PAX_MEMORY_UDEREF
23818+ testb $3, CS(%rdi)
23819+ jnz 1f
23820+ pax_enter_kernel
23821+ jmp 2f
23822+1: pax_enter_kernel_user
23823+2:
23824+#else
23825+ pax_enter_kernel
23826+#endif
23827+
23828 /* We entered an interrupt context - irqs are off: */
23829 TRACE_IRQS_OFF
23830
23831@@ -735,14 +1235,14 @@ ret_from_intr:
23832
23833 /* Restore saved previous stack */
23834 popq %rsi
23835- CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
23836- leaq ARGOFFSET-RBP(%rsi), %rsp
23837+ CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */
23838+ movq %rsi, %rsp
23839 CFI_DEF_CFA_REGISTER rsp
23840- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
23841+ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
23842
23843 exit_intr:
23844 GET_THREAD_INFO(%rcx)
23845- testl $3,CS-ARGOFFSET(%rsp)
23846+ testb $3,CS-ARGOFFSET(%rsp)
23847 je retint_kernel
23848
23849 /* Interrupt came from user space */
23850@@ -764,14 +1264,16 @@ retint_swapgs: /* return to user-space */
23851 * The iretq could re-enable interrupts:
23852 */
23853 DISABLE_INTERRUPTS(CLBR_ANY)
23854+ pax_exit_kernel_user
23855+retint_swapgs_pax:
23856 TRACE_IRQS_IRETQ
23857
23858 /*
23859 * Try to use SYSRET instead of IRET if we're returning to
23860 * a completely clean 64-bit userspace context.
23861 */
23862- movq (RCX-R11)(%rsp), %rcx
23863- cmpq %rcx,(RIP-R11)(%rsp) /* RCX == RIP */
23864+ movq (RCX-ARGOFFSET)(%rsp), %rcx
23865+ cmpq %rcx,(RIP-ARGOFFSET)(%rsp) /* RCX == RIP */
23866 jne opportunistic_sysret_failed
23867
23868 /*
23869@@ -792,7 +1294,7 @@ retint_swapgs: /* return to user-space */
23870 shr $__VIRTUAL_MASK_SHIFT, %rcx
23871 jnz opportunistic_sysret_failed
23872
23873- cmpq $__USER_CS,(CS-R11)(%rsp) /* CS must match SYSRET */
23874+ cmpq $__USER_CS,(CS-ARGOFFSET)(%rsp) /* CS must match SYSRET */
23875 jne opportunistic_sysret_failed
23876
23877 movq (R11-ARGOFFSET)(%rsp), %r11
23878@@ -838,6 +1340,27 @@ opportunistic_sysret_failed:
23879
23880 retint_restore_args: /* return to kernel space */
23881 DISABLE_INTERRUPTS(CLBR_ANY)
23882+ pax_exit_kernel
23883+
23884+#if defined(CONFIG_EFI) && defined(CONFIG_PAX_KERNEXEC)
23885+ /* This is a quirk to allow IRQs/NMIs/MCEs during early EFI setup,
23886+ * namely calling EFI runtime services with a phys mapping. We're
23887+ * starting off with NOPs and patch in the real instrumentation
23888+ * (BTS/OR) before starting any userland process; even before starting
23889+ * up the APs.
23890+ */
23891+ .pushsection .altinstr_replacement, "a"
23892+ 601: pax_force_retaddr (RIP-ARGOFFSET)
23893+ 602:
23894+ .popsection
23895+ 603: .fill 602b-601b, 1, 0x90
23896+ .pushsection .altinstructions, "a"
23897+ altinstruction_entry 603b, 601b, X86_FEATURE_ALWAYS, 602b-601b, 602b-601b
23898+ .popsection
23899+#else
23900+ pax_force_retaddr (RIP-ARGOFFSET)
23901+#endif
23902+
23903 /*
23904 * The iretq could re-enable interrupts:
23905 */
23906@@ -875,15 +1398,15 @@ native_irq_return_ldt:
23907 SWAPGS
23908 movq PER_CPU_VAR(espfix_waddr),%rdi
23909 movq %rax,(0*8)(%rdi) /* RAX */
23910- movq (2*8)(%rsp),%rax /* RIP */
23911+ movq (2*8 + RIP-RIP)(%rsp),%rax /* RIP */
23912 movq %rax,(1*8)(%rdi)
23913- movq (3*8)(%rsp),%rax /* CS */
23914+ movq (2*8 + CS-RIP)(%rsp),%rax /* CS */
23915 movq %rax,(2*8)(%rdi)
23916- movq (4*8)(%rsp),%rax /* RFLAGS */
23917+ movq (2*8 + EFLAGS-RIP)(%rsp),%rax /* RFLAGS */
23918 movq %rax,(3*8)(%rdi)
23919- movq (6*8)(%rsp),%rax /* SS */
23920+ movq (2*8 + SS-RIP)(%rsp),%rax /* SS */
23921 movq %rax,(5*8)(%rdi)
23922- movq (5*8)(%rsp),%rax /* RSP */
23923+ movq (2*8 + RSP-RIP)(%rsp),%rax /* RSP */
23924 movq %rax,(4*8)(%rdi)
23925 andl $0xffff0000,%eax
23926 popq_cfi %rdi
23927@@ -937,7 +1460,7 @@ ENTRY(retint_kernel)
23928 jmp exit_intr
23929 #endif
23930 CFI_ENDPROC
23931-END(common_interrupt)
23932+ENDPROC(common_interrupt)
23933
23934 /*
23935 * APIC interrupts.
23936@@ -951,7 +1474,7 @@ ENTRY(\sym)
23937 interrupt \do_sym
23938 jmp ret_from_intr
23939 CFI_ENDPROC
23940-END(\sym)
23941+ENDPROC(\sym)
23942 .endm
23943
23944 #ifdef CONFIG_TRACING
23945@@ -1024,7 +1547,7 @@ apicinterrupt IRQ_WORK_VECTOR \
23946 /*
23947 * Exception entry points.
23948 */
23949-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
23950+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
23951
23952 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
23953 ENTRY(\sym)
23954@@ -1080,6 +1603,12 @@ ENTRY(\sym)
23955 .endif
23956
23957 .if \shift_ist != -1
23958+#ifdef CONFIG_SMP
23959+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
23960+ lea init_tss(%r13), %r13
23961+#else
23962+ lea init_tss(%rip), %r13
23963+#endif
23964 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\shift_ist)
23965 .endif
23966
23967@@ -1126,7 +1655,7 @@ ENTRY(\sym)
23968 .endif
23969
23970 CFI_ENDPROC
23971-END(\sym)
23972+ENDPROC(\sym)
23973 .endm
23974
23975 #ifdef CONFIG_TRACING
23976@@ -1167,9 +1696,10 @@ gs_change:
23977 2: mfence /* workaround */
23978 SWAPGS
23979 popfq_cfi
23980+ pax_force_retaddr
23981 ret
23982 CFI_ENDPROC
23983-END(native_load_gs_index)
23984+ENDPROC(native_load_gs_index)
23985
23986 _ASM_EXTABLE(gs_change,bad_gs)
23987 .section .fixup,"ax"
23988@@ -1197,9 +1727,10 @@ ENTRY(do_softirq_own_stack)
23989 CFI_DEF_CFA_REGISTER rsp
23990 CFI_ADJUST_CFA_OFFSET -8
23991 decl PER_CPU_VAR(irq_count)
23992+ pax_force_retaddr
23993 ret
23994 CFI_ENDPROC
23995-END(do_softirq_own_stack)
23996+ENDPROC(do_softirq_own_stack)
23997
23998 #ifdef CONFIG_XEN
23999 idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
24000@@ -1240,7 +1771,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
24001 #endif
24002 jmp error_exit
24003 CFI_ENDPROC
24004-END(xen_do_hypervisor_callback)
24005+ENDPROC(xen_do_hypervisor_callback)
24006
24007 /*
24008 * Hypervisor uses this for application faults while it executes.
24009@@ -1299,7 +1830,7 @@ ENTRY(xen_failsafe_callback)
24010 SAVE_ALL
24011 jmp error_exit
24012 CFI_ENDPROC
24013-END(xen_failsafe_callback)
24014+ENDPROC(xen_failsafe_callback)
24015
24016 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
24017 xen_hvm_callback_vector xen_evtchn_do_upcall
24018@@ -1344,18 +1875,25 @@ ENTRY(paranoid_exit)
24019 DEFAULT_FRAME
24020 DISABLE_INTERRUPTS(CLBR_NONE)
24021 TRACE_IRQS_OFF_DEBUG
24022- testl %ebx,%ebx /* swapgs needed? */
24023+ testl $1,%ebx /* swapgs needed? */
24024 jnz paranoid_restore
24025+#ifdef CONFIG_PAX_MEMORY_UDEREF
24026+ pax_exit_kernel_user
24027+#else
24028+ pax_exit_kernel
24029+#endif
24030 TRACE_IRQS_IRETQ 0
24031 SWAPGS_UNSAFE_STACK
24032 RESTORE_ALL 8
24033 INTERRUPT_RETURN
24034 paranoid_restore:
24035+ pax_exit_kernel
24036 TRACE_IRQS_IRETQ_DEBUG 0
24037 RESTORE_ALL 8
24038+ pax_force_retaddr_bts
24039 INTERRUPT_RETURN
24040 CFI_ENDPROC
24041-END(paranoid_exit)
24042+ENDPROC(paranoid_exit)
24043
24044 /*
24045 * Exception entry point. This expects an error code/orig_rax on the stack.
24046@@ -1382,12 +1920,23 @@ ENTRY(error_entry)
24047 movq %r14, R14+8(%rsp)
24048 movq %r15, R15+8(%rsp)
24049 xorl %ebx,%ebx
24050- testl $3,CS+8(%rsp)
24051+ testb $3,CS+8(%rsp)
24052 je error_kernelspace
24053 error_swapgs:
24054 SWAPGS
24055 error_sti:
24056+#ifdef CONFIG_PAX_MEMORY_UDEREF
24057+ testb $3, CS+8(%rsp)
24058+ jnz 1f
24059+ pax_enter_kernel
24060+ jmp 2f
24061+1: pax_enter_kernel_user
24062+2:
24063+#else
24064+ pax_enter_kernel
24065+#endif
24066 TRACE_IRQS_OFF
24067+ pax_force_retaddr
24068 ret
24069
24070 /*
24071@@ -1422,7 +1971,7 @@ error_bad_iret:
24072 decl %ebx /* Return to usergs */
24073 jmp error_sti
24074 CFI_ENDPROC
24075-END(error_entry)
24076+ENDPROC(error_entry)
24077
24078
24079 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
24080@@ -1433,7 +1982,7 @@ ENTRY(error_exit)
24081 DISABLE_INTERRUPTS(CLBR_NONE)
24082 TRACE_IRQS_OFF
24083 GET_THREAD_INFO(%rcx)
24084- testl %eax,%eax
24085+ testl $1,%eax
24086 jne retint_kernel
24087 LOCKDEP_SYS_EXIT_IRQ
24088 movl TI_flags(%rcx),%edx
24089@@ -1442,7 +1991,7 @@ ENTRY(error_exit)
24090 jnz retint_careful
24091 jmp retint_swapgs
24092 CFI_ENDPROC
24093-END(error_exit)
24094+ENDPROC(error_exit)
24095
24096 /*
24097 * Test if a given stack is an NMI stack or not.
24098@@ -1500,9 +2049,11 @@ ENTRY(nmi)
24099 * If %cs was not the kernel segment, then the NMI triggered in user
24100 * space, which means it is definitely not nested.
24101 */
24102+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
24103+ je 1f
24104 cmpl $__KERNEL_CS, 16(%rsp)
24105 jne first_nmi
24106-
24107+1:
24108 /*
24109 * Check the special variable on the stack to see if NMIs are
24110 * executing.
24111@@ -1536,8 +2087,7 @@ nested_nmi:
24112
24113 1:
24114 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
24115- leaq -1*8(%rsp), %rdx
24116- movq %rdx, %rsp
24117+ subq $8, %rsp
24118 CFI_ADJUST_CFA_OFFSET 1*8
24119 leaq -10*8(%rsp), %rdx
24120 pushq_cfi $__KERNEL_DS
24121@@ -1555,6 +2105,7 @@ nested_nmi_out:
24122 CFI_RESTORE rdx
24123
24124 /* No need to check faults here */
24125+# pax_force_retaddr_bts
24126 INTERRUPT_RETURN
24127
24128 CFI_RESTORE_STATE
24129@@ -1651,13 +2202,13 @@ end_repeat_nmi:
24130 subq $ORIG_RAX-R15, %rsp
24131 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
24132 /*
24133- * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
24134+ * Use save_paranoid_nmi to handle SWAPGS, but no need to use paranoid_exit
24135 * as we should not be calling schedule in NMI context.
24136 * Even with normal interrupts enabled. An NMI should not be
24137 * setting NEED_RESCHED or anything that normal interrupts and
24138 * exceptions might do.
24139 */
24140- call save_paranoid
24141+ call save_paranoid_nmi
24142 DEFAULT_FRAME 0
24143
24144 /*
24145@@ -1667,9 +2218,9 @@ end_repeat_nmi:
24146 * NMI itself takes a page fault, the page fault that was preempted
24147 * will read the information from the NMI page fault and not the
24148 * origin fault. Save it off and restore it if it changes.
24149- * Use the r12 callee-saved register.
24150+ * Use the r13 callee-saved register.
24151 */
24152- movq %cr2, %r12
24153+ movq %cr2, %r13
24154
24155 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
24156 movq %rsp,%rdi
24157@@ -1678,29 +2229,34 @@ end_repeat_nmi:
24158
24159 /* Did the NMI take a page fault? Restore cr2 if it did */
24160 movq %cr2, %rcx
24161- cmpq %rcx, %r12
24162+ cmpq %rcx, %r13
24163 je 1f
24164- movq %r12, %cr2
24165+ movq %r13, %cr2
24166 1:
24167
24168- testl %ebx,%ebx /* swapgs needed? */
24169+ testl $1,%ebx /* swapgs needed? */
24170 jnz nmi_restore
24171 nmi_swapgs:
24172 SWAPGS_UNSAFE_STACK
24173 nmi_restore:
24174+ pax_exit_kernel_nmi
24175 /* Pop the extra iret frame at once */
24176 RESTORE_ALL 6*8
24177+ testb $3, 8(%rsp)
24178+ jnz 1f
24179+ pax_force_retaddr_bts
24180+1:
24181
24182 /* Clear the NMI executing stack variable */
24183 movq $0, 5*8(%rsp)
24184 jmp irq_return
24185 CFI_ENDPROC
24186-END(nmi)
24187+ENDPROC(nmi)
24188
24189 ENTRY(ignore_sysret)
24190 CFI_STARTPROC
24191 mov $-ENOSYS,%eax
24192 sysret
24193 CFI_ENDPROC
24194-END(ignore_sysret)
24195+ENDPROC(ignore_sysret)
24196
24197diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
24198index f5d0730..5bce89c 100644
24199--- a/arch/x86/kernel/espfix_64.c
24200+++ b/arch/x86/kernel/espfix_64.c
24201@@ -70,8 +70,7 @@ static DEFINE_MUTEX(espfix_init_mutex);
24202 #define ESPFIX_MAX_PAGES DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
24203 static void *espfix_pages[ESPFIX_MAX_PAGES];
24204
24205-static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
24206- __aligned(PAGE_SIZE);
24207+static pud_t espfix_pud_page[PTRS_PER_PUD] __page_aligned_rodata;
24208
24209 static unsigned int page_random, slot_random;
24210
24211@@ -122,11 +121,17 @@ static void init_espfix_random(void)
24212 void __init init_espfix_bsp(void)
24213 {
24214 pgd_t *pgd_p;
24215+ unsigned long index = pgd_index(ESPFIX_BASE_ADDR);
24216
24217 /* Install the espfix pud into the kernel page directory */
24218- pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
24219+ pgd_p = &init_level4_pgt[index];
24220 pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page);
24221
24222+#ifdef CONFIG_PAX_PER_CPU_PGD
24223+ clone_pgd_range(get_cpu_pgd(0, kernel) + index, swapper_pg_dir + index, 1);
24224+ clone_pgd_range(get_cpu_pgd(0, user) + index, swapper_pg_dir + index, 1);
24225+#endif
24226+
24227 /* Randomize the locations */
24228 init_espfix_random();
24229
24230@@ -194,7 +199,7 @@ void init_espfix_ap(void)
24231 set_pte(&pte_p[n*PTE_STRIDE], pte);
24232
24233 /* Job is done for this CPU and any CPU which shares this page */
24234- ACCESS_ONCE(espfix_pages[page]) = stack_page;
24235+ ACCESS_ONCE_RW(espfix_pages[page]) = stack_page;
24236
24237 unlock_done:
24238 mutex_unlock(&espfix_init_mutex);
24239diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
24240index 8b7b0a5..2395f29 100644
24241--- a/arch/x86/kernel/ftrace.c
24242+++ b/arch/x86/kernel/ftrace.c
24243@@ -89,7 +89,7 @@ static unsigned long text_ip_addr(unsigned long ip)
24244 * kernel identity mapping to modify code.
24245 */
24246 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
24247- ip = (unsigned long)__va(__pa_symbol(ip));
24248+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
24249
24250 return ip;
24251 }
24252@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
24253 {
24254 unsigned char replaced[MCOUNT_INSN_SIZE];
24255
24256+ ip = ktla_ktva(ip);
24257+
24258 /*
24259 * Note: Due to modules and __init, code can
24260 * disappear and change, we need to protect against faulting
24261@@ -230,7 +232,7 @@ static int update_ftrace_func(unsigned long ip, void *new)
24262 unsigned char old[MCOUNT_INSN_SIZE];
24263 int ret;
24264
24265- memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
24266+ memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE);
24267
24268 ftrace_update_func = ip;
24269 /* Make sure the breakpoints see the ftrace_update_func update */
24270@@ -311,7 +313,7 @@ static int add_break(unsigned long ip, const char *old)
24271 unsigned char replaced[MCOUNT_INSN_SIZE];
24272 unsigned char brk = BREAKPOINT_INSTRUCTION;
24273
24274- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
24275+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
24276 return -EFAULT;
24277
24278 /* Make sure it is what we expect it to be */
24279diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
24280index c4f8d46..2d63ae2 100644
24281--- a/arch/x86/kernel/head64.c
24282+++ b/arch/x86/kernel/head64.c
24283@@ -68,12 +68,12 @@ again:
24284 pgd = *pgd_p;
24285
24286 /*
24287- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
24288- * critical -- __PAGE_OFFSET would point us back into the dynamic
24289+ * The use of __early_va rather than __va here is critical:
24290+ * __va would point us back into the dynamic
24291 * range and we might end up looping forever...
24292 */
24293 if (pgd)
24294- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24295+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
24296 else {
24297 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24298 reset_early_page_tables();
24299@@ -83,13 +83,13 @@ again:
24300 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
24301 for (i = 0; i < PTRS_PER_PUD; i++)
24302 pud_p[i] = 0;
24303- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24304+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
24305 }
24306 pud_p += pud_index(address);
24307 pud = *pud_p;
24308
24309 if (pud)
24310- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24311+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
24312 else {
24313 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24314 reset_early_page_tables();
24315@@ -99,7 +99,7 @@ again:
24316 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
24317 for (i = 0; i < PTRS_PER_PMD; i++)
24318 pmd_p[i] = 0;
24319- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24320+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
24321 }
24322 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
24323 pmd_p[pmd_index(address)] = pmd;
24324@@ -180,7 +180,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
24325 if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG)
24326 early_printk("Kernel alive\n");
24327
24328- clear_page(init_level4_pgt);
24329 /* set init_level4_pgt kernel high mapping*/
24330 init_level4_pgt[511] = early_level4_pgt[511];
24331
24332diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
24333index f36bd42..0ab4474 100644
24334--- a/arch/x86/kernel/head_32.S
24335+++ b/arch/x86/kernel/head_32.S
24336@@ -26,6 +26,12 @@
24337 /* Physical address */
24338 #define pa(X) ((X) - __PAGE_OFFSET)
24339
24340+#ifdef CONFIG_PAX_KERNEXEC
24341+#define ta(X) (X)
24342+#else
24343+#define ta(X) ((X) - __PAGE_OFFSET)
24344+#endif
24345+
24346 /*
24347 * References to members of the new_cpu_data structure.
24348 */
24349@@ -55,11 +61,7 @@
24350 * and small than max_low_pfn, otherwise will waste some page table entries
24351 */
24352
24353-#if PTRS_PER_PMD > 1
24354-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
24355-#else
24356-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
24357-#endif
24358+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
24359
24360 /* Number of possible pages in the lowmem region */
24361 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
24362@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
24363 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24364
24365 /*
24366+ * Real beginning of normal "text" segment
24367+ */
24368+ENTRY(stext)
24369+ENTRY(_stext)
24370+
24371+/*
24372 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
24373 * %esi points to the real-mode code as a 32-bit pointer.
24374 * CS and DS must be 4 GB flat segments, but we don't depend on
24375@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24376 * can.
24377 */
24378 __HEAD
24379+
24380+#ifdef CONFIG_PAX_KERNEXEC
24381+ jmp startup_32
24382+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
24383+.fill PAGE_SIZE-5,1,0xcc
24384+#endif
24385+
24386 ENTRY(startup_32)
24387 movl pa(stack_start),%ecx
24388
24389@@ -106,6 +121,59 @@ ENTRY(startup_32)
24390 2:
24391 leal -__PAGE_OFFSET(%ecx),%esp
24392
24393+#ifdef CONFIG_SMP
24394+ movl $pa(cpu_gdt_table),%edi
24395+ movl $__per_cpu_load,%eax
24396+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
24397+ rorl $16,%eax
24398+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
24399+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
24400+ movl $__per_cpu_end - 1,%eax
24401+ subl $__per_cpu_start,%eax
24402+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
24403+#endif
24404+
24405+#ifdef CONFIG_PAX_MEMORY_UDEREF
24406+ movl $NR_CPUS,%ecx
24407+ movl $pa(cpu_gdt_table),%edi
24408+1:
24409+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
24410+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
24411+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
24412+ addl $PAGE_SIZE_asm,%edi
24413+ loop 1b
24414+#endif
24415+
24416+#ifdef CONFIG_PAX_KERNEXEC
24417+ movl $pa(boot_gdt),%edi
24418+ movl $__LOAD_PHYSICAL_ADDR,%eax
24419+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
24420+ rorl $16,%eax
24421+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
24422+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
24423+ rorl $16,%eax
24424+
24425+ ljmp $(__BOOT_CS),$1f
24426+1:
24427+
24428+ movl $NR_CPUS,%ecx
24429+ movl $pa(cpu_gdt_table),%edi
24430+ addl $__PAGE_OFFSET,%eax
24431+1:
24432+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
24433+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
24434+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
24435+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
24436+ rorl $16,%eax
24437+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
24438+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
24439+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
24440+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
24441+ rorl $16,%eax
24442+ addl $PAGE_SIZE_asm,%edi
24443+ loop 1b
24444+#endif
24445+
24446 /*
24447 * Clear BSS first so that there are no surprises...
24448 */
24449@@ -201,8 +269,11 @@ ENTRY(startup_32)
24450 movl %eax, pa(max_pfn_mapped)
24451
24452 /* Do early initialization of the fixmap area */
24453- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24454- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
24455+#ifdef CONFIG_COMPAT_VDSO
24456+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
24457+#else
24458+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
24459+#endif
24460 #else /* Not PAE */
24461
24462 page_pde_offset = (__PAGE_OFFSET >> 20);
24463@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24464 movl %eax, pa(max_pfn_mapped)
24465
24466 /* Do early initialization of the fixmap area */
24467- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24468- movl %eax,pa(initial_page_table+0xffc)
24469+#ifdef CONFIG_COMPAT_VDSO
24470+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
24471+#else
24472+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
24473+#endif
24474 #endif
24475
24476 #ifdef CONFIG_PARAVIRT
24477@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24478 cmpl $num_subarch_entries, %eax
24479 jae bad_subarch
24480
24481- movl pa(subarch_entries)(,%eax,4), %eax
24482- subl $__PAGE_OFFSET, %eax
24483- jmp *%eax
24484+ jmp *pa(subarch_entries)(,%eax,4)
24485
24486 bad_subarch:
24487 WEAK(lguest_entry)
24488@@ -261,10 +333,10 @@ WEAK(xen_entry)
24489 __INITDATA
24490
24491 subarch_entries:
24492- .long default_entry /* normal x86/PC */
24493- .long lguest_entry /* lguest hypervisor */
24494- .long xen_entry /* Xen hypervisor */
24495- .long default_entry /* Moorestown MID */
24496+ .long ta(default_entry) /* normal x86/PC */
24497+ .long ta(lguest_entry) /* lguest hypervisor */
24498+ .long ta(xen_entry) /* Xen hypervisor */
24499+ .long ta(default_entry) /* Moorestown MID */
24500 num_subarch_entries = (. - subarch_entries) / 4
24501 .previous
24502 #else
24503@@ -354,6 +426,7 @@ default_entry:
24504 movl pa(mmu_cr4_features),%eax
24505 movl %eax,%cr4
24506
24507+#ifdef CONFIG_X86_PAE
24508 testb $X86_CR4_PAE, %al # check if PAE is enabled
24509 jz enable_paging
24510
24511@@ -382,6 +455,9 @@ default_entry:
24512 /* Make changes effective */
24513 wrmsr
24514
24515+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
24516+#endif
24517+
24518 enable_paging:
24519
24520 /*
24521@@ -449,14 +525,20 @@ is486:
24522 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
24523 movl %eax,%ss # after changing gdt.
24524
24525- movl $(__USER_DS),%eax # DS/ES contains default USER segment
24526+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
24527 movl %eax,%ds
24528 movl %eax,%es
24529
24530 movl $(__KERNEL_PERCPU), %eax
24531 movl %eax,%fs # set this cpu's percpu
24532
24533+#ifdef CONFIG_CC_STACKPROTECTOR
24534 movl $(__KERNEL_STACK_CANARY),%eax
24535+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
24536+ movl $(__USER_DS),%eax
24537+#else
24538+ xorl %eax,%eax
24539+#endif
24540 movl %eax,%gs
24541
24542 xorl %eax,%eax # Clear LDT
24543@@ -512,8 +594,11 @@ setup_once:
24544 * relocation. Manually set base address in stack canary
24545 * segment descriptor.
24546 */
24547- movl $gdt_page,%eax
24548+ movl $cpu_gdt_table,%eax
24549 movl $stack_canary,%ecx
24550+#ifdef CONFIG_SMP
24551+ addl $__per_cpu_load,%ecx
24552+#endif
24553 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
24554 shrl $16, %ecx
24555 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
24556@@ -548,7 +633,7 @@ ENTRY(early_idt_handler)
24557 cmpl $2,(%esp) # X86_TRAP_NMI
24558 je is_nmi # Ignore NMI
24559
24560- cmpl $2,%ss:early_recursion_flag
24561+ cmpl $1,%ss:early_recursion_flag
24562 je hlt_loop
24563 incl %ss:early_recursion_flag
24564
24565@@ -586,8 +671,8 @@ ENTRY(early_idt_handler)
24566 pushl (20+6*4)(%esp) /* trapno */
24567 pushl $fault_msg
24568 call printk
24569-#endif
24570 call dump_stack
24571+#endif
24572 hlt_loop:
24573 hlt
24574 jmp hlt_loop
24575@@ -607,8 +692,11 @@ ENDPROC(early_idt_handler)
24576 /* This is the default interrupt "handler" :-) */
24577 ALIGN
24578 ignore_int:
24579- cld
24580 #ifdef CONFIG_PRINTK
24581+ cmpl $2,%ss:early_recursion_flag
24582+ je hlt_loop
24583+ incl %ss:early_recursion_flag
24584+ cld
24585 pushl %eax
24586 pushl %ecx
24587 pushl %edx
24588@@ -617,9 +705,6 @@ ignore_int:
24589 movl $(__KERNEL_DS),%eax
24590 movl %eax,%ds
24591 movl %eax,%es
24592- cmpl $2,early_recursion_flag
24593- je hlt_loop
24594- incl early_recursion_flag
24595 pushl 16(%esp)
24596 pushl 24(%esp)
24597 pushl 32(%esp)
24598@@ -653,29 +738,34 @@ ENTRY(setup_once_ref)
24599 /*
24600 * BSS section
24601 */
24602-__PAGE_ALIGNED_BSS
24603- .align PAGE_SIZE
24604 #ifdef CONFIG_X86_PAE
24605+.section .initial_pg_pmd,"a",@progbits
24606 initial_pg_pmd:
24607 .fill 1024*KPMDS,4,0
24608 #else
24609+.section .initial_page_table,"a",@progbits
24610 ENTRY(initial_page_table)
24611 .fill 1024,4,0
24612 #endif
24613+.section .initial_pg_fixmap,"a",@progbits
24614 initial_pg_fixmap:
24615 .fill 1024,4,0
24616+.section .empty_zero_page,"a",@progbits
24617 ENTRY(empty_zero_page)
24618 .fill 4096,1,0
24619+.section .swapper_pg_dir,"a",@progbits
24620 ENTRY(swapper_pg_dir)
24621+#ifdef CONFIG_X86_PAE
24622+ .fill 4,8,0
24623+#else
24624 .fill 1024,4,0
24625+#endif
24626
24627 /*
24628 * This starts the data section.
24629 */
24630 #ifdef CONFIG_X86_PAE
24631-__PAGE_ALIGNED_DATA
24632- /* Page-aligned for the benefit of paravirt? */
24633- .align PAGE_SIZE
24634+.section .initial_page_table,"a",@progbits
24635 ENTRY(initial_page_table)
24636 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
24637 # if KPMDS == 3
24638@@ -694,12 +784,20 @@ ENTRY(initial_page_table)
24639 # error "Kernel PMDs should be 1, 2 or 3"
24640 # endif
24641 .align PAGE_SIZE /* needs to be page-sized too */
24642+
24643+#ifdef CONFIG_PAX_PER_CPU_PGD
24644+ENTRY(cpu_pgd)
24645+ .rept 2*NR_CPUS
24646+ .fill 4,8,0
24647+ .endr
24648+#endif
24649+
24650 #endif
24651
24652 .data
24653 .balign 4
24654 ENTRY(stack_start)
24655- .long init_thread_union+THREAD_SIZE
24656+ .long init_thread_union+THREAD_SIZE-8
24657
24658 __INITRODATA
24659 int_msg:
24660@@ -727,7 +825,7 @@ fault_msg:
24661 * segment size, and 32-bit linear address value:
24662 */
24663
24664- .data
24665+.section .rodata,"a",@progbits
24666 .globl boot_gdt_descr
24667 .globl idt_descr
24668
24669@@ -736,7 +834,7 @@ fault_msg:
24670 .word 0 # 32 bit align gdt_desc.address
24671 boot_gdt_descr:
24672 .word __BOOT_DS+7
24673- .long boot_gdt - __PAGE_OFFSET
24674+ .long pa(boot_gdt)
24675
24676 .word 0 # 32-bit align idt_desc.address
24677 idt_descr:
24678@@ -747,7 +845,7 @@ idt_descr:
24679 .word 0 # 32 bit align gdt_desc.address
24680 ENTRY(early_gdt_descr)
24681 .word GDT_ENTRIES*8-1
24682- .long gdt_page /* Overwritten for secondary CPUs */
24683+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
24684
24685 /*
24686 * The boot_gdt must mirror the equivalent in setup.S and is
24687@@ -756,5 +854,65 @@ ENTRY(early_gdt_descr)
24688 .align L1_CACHE_BYTES
24689 ENTRY(boot_gdt)
24690 .fill GDT_ENTRY_BOOT_CS,8,0
24691- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
24692- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
24693+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
24694+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
24695+
24696+ .align PAGE_SIZE_asm
24697+ENTRY(cpu_gdt_table)
24698+ .rept NR_CPUS
24699+ .quad 0x0000000000000000 /* NULL descriptor */
24700+ .quad 0x0000000000000000 /* 0x0b reserved */
24701+ .quad 0x0000000000000000 /* 0x13 reserved */
24702+ .quad 0x0000000000000000 /* 0x1b reserved */
24703+
24704+#ifdef CONFIG_PAX_KERNEXEC
24705+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
24706+#else
24707+ .quad 0x0000000000000000 /* 0x20 unused */
24708+#endif
24709+
24710+ .quad 0x0000000000000000 /* 0x28 unused */
24711+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
24712+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
24713+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
24714+ .quad 0x0000000000000000 /* 0x4b reserved */
24715+ .quad 0x0000000000000000 /* 0x53 reserved */
24716+ .quad 0x0000000000000000 /* 0x5b reserved */
24717+
24718+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
24719+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
24720+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
24721+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
24722+
24723+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
24724+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
24725+
24726+ /*
24727+ * Segments used for calling PnP BIOS have byte granularity.
24728+ * The code segments and data segments have fixed 64k limits,
24729+ * the transfer segment sizes are set at run time.
24730+ */
24731+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
24732+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
24733+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
24734+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
24735+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
24736+
24737+ /*
24738+ * The APM segments have byte granularity and their bases
24739+ * are set at run time. All have 64k limits.
24740+ */
24741+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
24742+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
24743+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
24744+
24745+ .quad 0x00c093000000ffff /* 0xd0 - ESPFIX SS */
24746+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
24747+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
24748+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
24749+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
24750+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
24751+
24752+ /* Be sure this is zeroed to avoid false validations in Xen */
24753+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
24754+ .endr
24755diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
24756index 6fd514d9..320367e 100644
24757--- a/arch/x86/kernel/head_64.S
24758+++ b/arch/x86/kernel/head_64.S
24759@@ -20,6 +20,8 @@
24760 #include <asm/processor-flags.h>
24761 #include <asm/percpu.h>
24762 #include <asm/nops.h>
24763+#include <asm/cpufeature.h>
24764+#include <asm/alternative-asm.h>
24765
24766 #ifdef CONFIG_PARAVIRT
24767 #include <asm/asm-offsets.h>
24768@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
24769 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
24770 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
24771 L3_START_KERNEL = pud_index(__START_KERNEL_map)
24772+L4_VMALLOC_START = pgd_index(VMALLOC_START)
24773+L3_VMALLOC_START = pud_index(VMALLOC_START)
24774+L4_VMALLOC_END = pgd_index(VMALLOC_END)
24775+L3_VMALLOC_END = pud_index(VMALLOC_END)
24776+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
24777+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
24778
24779 .text
24780 __HEAD
24781@@ -89,11 +97,26 @@ startup_64:
24782 * Fixup the physical addresses in the page table
24783 */
24784 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
24785+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
24786+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
24787+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
24788+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
24789+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
24790
24791- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
24792- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
24793+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
24794+#ifndef CONFIG_XEN
24795+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
24796+#endif
24797
24798+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
24799+
24800+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
24801+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
24802+
24803+ addq %rbp, level2_fixmap_pgt + (504*8)(%rip)
24804+ addq %rbp, level2_fixmap_pgt + (505*8)(%rip)
24805 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
24806+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
24807
24808 /*
24809 * Set up the identity mapping for the switchover. These
24810@@ -174,11 +197,12 @@ ENTRY(secondary_startup_64)
24811 * after the boot processor executes this code.
24812 */
24813
24814+ orq $-1, %rbp
24815 movq $(init_level4_pgt - __START_KERNEL_map), %rax
24816 1:
24817
24818- /* Enable PAE mode and PGE */
24819- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
24820+ /* Enable PAE mode and PSE/PGE */
24821+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
24822 movq %rcx, %cr4
24823
24824 /* Setup early boot stage 4 level pagetables. */
24825@@ -199,10 +223,21 @@ ENTRY(secondary_startup_64)
24826 movl $MSR_EFER, %ecx
24827 rdmsr
24828 btsl $_EFER_SCE, %eax /* Enable System Call */
24829- btl $20,%edi /* No Execute supported? */
24830+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
24831 jnc 1f
24832 btsl $_EFER_NX, %eax
24833+ cmpq $-1, %rbp
24834+ je 1f
24835 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
24836+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
24837+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
24838+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
24839+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
24840+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*504(%rip)
24841+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*505(%rip)
24842+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
24843+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
24844+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
24845 1: wrmsr /* Make changes effective */
24846
24847 /* Setup cr0 */
24848@@ -282,6 +317,7 @@ ENTRY(secondary_startup_64)
24849 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
24850 * address given in m16:64.
24851 */
24852+ pax_set_fptr_mask
24853 movq initial_code(%rip),%rax
24854 pushq $0 # fake return address to stop unwinder
24855 pushq $__KERNEL_CS # set correct cs
24856@@ -313,7 +349,7 @@ ENDPROC(start_cpu0)
24857 .quad INIT_PER_CPU_VAR(irq_stack_union)
24858
24859 GLOBAL(stack_start)
24860- .quad init_thread_union+THREAD_SIZE-8
24861+ .quad init_thread_union+THREAD_SIZE-16
24862 .word 0
24863 __FINITDATA
24864
24865@@ -391,7 +427,7 @@ ENTRY(early_idt_handler)
24866 call dump_stack
24867 #ifdef CONFIG_KALLSYMS
24868 leaq early_idt_ripmsg(%rip),%rdi
24869- movq 40(%rsp),%rsi # %rip again
24870+ movq 88(%rsp),%rsi # %rip again
24871 call __print_symbol
24872 #endif
24873 #endif /* EARLY_PRINTK */
24874@@ -420,6 +456,7 @@ ENDPROC(early_idt_handler)
24875 early_recursion_flag:
24876 .long 0
24877
24878+ .section .rodata,"a",@progbits
24879 #ifdef CONFIG_EARLY_PRINTK
24880 early_idt_msg:
24881 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
24882@@ -447,29 +484,52 @@ NEXT_PAGE(early_level4_pgt)
24883 NEXT_PAGE(early_dynamic_pgts)
24884 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
24885
24886- .data
24887+ .section .rodata,"a",@progbits
24888
24889-#ifndef CONFIG_XEN
24890 NEXT_PAGE(init_level4_pgt)
24891- .fill 512,8,0
24892-#else
24893-NEXT_PAGE(init_level4_pgt)
24894- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24895 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
24896 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24897+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
24898+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
24899+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
24900+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
24901+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
24902+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
24903 .org init_level4_pgt + L4_START_KERNEL*8, 0
24904 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
24905 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
24906
24907+#ifdef CONFIG_PAX_PER_CPU_PGD
24908+NEXT_PAGE(cpu_pgd)
24909+ .rept 2*NR_CPUS
24910+ .fill 512,8,0
24911+ .endr
24912+#endif
24913+
24914 NEXT_PAGE(level3_ident_pgt)
24915 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24916+#ifdef CONFIG_XEN
24917 .fill 511, 8, 0
24918+#else
24919+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
24920+ .fill 510,8,0
24921+#endif
24922+
24923+NEXT_PAGE(level3_vmalloc_start_pgt)
24924+ .fill 512,8,0
24925+
24926+NEXT_PAGE(level3_vmalloc_end_pgt)
24927+ .fill 512,8,0
24928+
24929+NEXT_PAGE(level3_vmemmap_pgt)
24930+ .fill L3_VMEMMAP_START,8,0
24931+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
24932+
24933 NEXT_PAGE(level2_ident_pgt)
24934- /* Since I easily can, map the first 1G.
24935+ /* Since I easily can, map the first 2G.
24936 * Don't set NX because code runs from these pages.
24937 */
24938- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
24939-#endif
24940+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
24941
24942 NEXT_PAGE(level3_kernel_pgt)
24943 .fill L3_START_KERNEL,8,0
24944@@ -477,6 +537,9 @@ NEXT_PAGE(level3_kernel_pgt)
24945 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
24946 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
24947
24948+NEXT_PAGE(level2_vmemmap_pgt)
24949+ .fill 512,8,0
24950+
24951 NEXT_PAGE(level2_kernel_pgt)
24952 /*
24953 * 512 MB kernel mapping. We spend a full page on this pagetable
24954@@ -492,23 +555,61 @@ NEXT_PAGE(level2_kernel_pgt)
24955 KERNEL_IMAGE_SIZE/PMD_SIZE)
24956
24957 NEXT_PAGE(level2_fixmap_pgt)
24958- .fill 506,8,0
24959- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
24960- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
24961- .fill 5,8,0
24962+ .fill 504,8,0
24963+ .quad level1_fixmap_pgt - __START_KERNEL_map + 0 * PAGE_SIZE + _PAGE_TABLE
24964+ .quad level1_fixmap_pgt - __START_KERNEL_map + 1 * PAGE_SIZE + _PAGE_TABLE
24965+ .quad level1_fixmap_pgt - __START_KERNEL_map + 2 * PAGE_SIZE + _PAGE_TABLE
24966+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
24967+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
24968+ .fill 4,8,0
24969
24970 NEXT_PAGE(level1_fixmap_pgt)
24971+ .fill 3*512,8,0
24972+
24973+NEXT_PAGE(level1_vsyscall_pgt)
24974 .fill 512,8,0
24975
24976 #undef PMDS
24977
24978- .data
24979+ .align PAGE_SIZE
24980+ENTRY(cpu_gdt_table)
24981+ .rept NR_CPUS
24982+ .quad 0x0000000000000000 /* NULL descriptor */
24983+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
24984+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
24985+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
24986+ .quad 0x00cffb000000ffff /* __USER32_CS */
24987+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
24988+ .quad 0x00affb000000ffff /* __USER_CS */
24989+
24990+#ifdef CONFIG_PAX_KERNEXEC
24991+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
24992+#else
24993+ .quad 0x0 /* unused */
24994+#endif
24995+
24996+ .quad 0,0 /* TSS */
24997+ .quad 0,0 /* LDT */
24998+ .quad 0,0,0 /* three TLS descriptors */
24999+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
25000+ /* asm/segment.h:GDT_ENTRIES must match this */
25001+
25002+#ifdef CONFIG_PAX_MEMORY_UDEREF
25003+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
25004+#else
25005+ .quad 0x0 /* unused */
25006+#endif
25007+
25008+ /* zero the remaining page */
25009+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
25010+ .endr
25011+
25012 .align 16
25013 .globl early_gdt_descr
25014 early_gdt_descr:
25015 .word GDT_ENTRIES*8-1
25016 early_gdt_descr_base:
25017- .quad INIT_PER_CPU_VAR(gdt_page)
25018+ .quad cpu_gdt_table
25019
25020 ENTRY(phys_base)
25021 /* This must match the first entry in level2_kernel_pgt */
25022@@ -532,8 +633,8 @@ NEXT_PAGE(kasan_zero_pud)
25023
25024
25025 #include "../../x86/xen/xen-head.S"
25026-
25027- __PAGE_ALIGNED_BSS
25028+
25029+ .section .rodata,"a",@progbits
25030 NEXT_PAGE(empty_zero_page)
25031 .skip PAGE_SIZE
25032
25033diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
25034index 05fd74f..c3548b1 100644
25035--- a/arch/x86/kernel/i386_ksyms_32.c
25036+++ b/arch/x86/kernel/i386_ksyms_32.c
25037@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
25038 EXPORT_SYMBOL(cmpxchg8b_emu);
25039 #endif
25040
25041+EXPORT_SYMBOL_GPL(cpu_gdt_table);
25042+
25043 /* Networking helper routines. */
25044 EXPORT_SYMBOL(csum_partial_copy_generic);
25045+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
25046+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
25047
25048 EXPORT_SYMBOL(__get_user_1);
25049 EXPORT_SYMBOL(__get_user_2);
25050@@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
25051 EXPORT_SYMBOL(___preempt_schedule_context);
25052 #endif
25053 #endif
25054+
25055+#ifdef CONFIG_PAX_KERNEXEC
25056+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
25057+#endif
25058+
25059+#ifdef CONFIG_PAX_PER_CPU_PGD
25060+EXPORT_SYMBOL(cpu_pgd);
25061+#endif
25062diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
25063index d5651fc..29c740d 100644
25064--- a/arch/x86/kernel/i387.c
25065+++ b/arch/x86/kernel/i387.c
25066@@ -68,7 +68,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
25067 static inline bool interrupted_user_mode(void)
25068 {
25069 struct pt_regs *regs = get_irq_regs();
25070- return regs && user_mode_vm(regs);
25071+ return regs && user_mode(regs);
25072 }
25073
25074 /*
25075diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
25076index e7cc537..67d7372 100644
25077--- a/arch/x86/kernel/i8259.c
25078+++ b/arch/x86/kernel/i8259.c
25079@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
25080 static void make_8259A_irq(unsigned int irq)
25081 {
25082 disable_irq_nosync(irq);
25083- io_apic_irqs &= ~(1<<irq);
25084+ io_apic_irqs &= ~(1UL<<irq);
25085 irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
25086 enable_irq(irq);
25087 }
25088@@ -208,7 +208,7 @@ spurious_8259A_irq:
25089 "spurious 8259A interrupt: IRQ%d.\n", irq);
25090 spurious_irq_mask |= irqmask;
25091 }
25092- atomic_inc(&irq_err_count);
25093+ atomic_inc_unchecked(&irq_err_count);
25094 /*
25095 * Theoretically we do not have to handle this IRQ,
25096 * but in Linux this does not cause problems and is
25097@@ -349,14 +349,16 @@ static void init_8259A(int auto_eoi)
25098 /* (slave's support for AEOI in flat mode is to be investigated) */
25099 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
25100
25101+ pax_open_kernel();
25102 if (auto_eoi)
25103 /*
25104 * In AEOI mode we just have to mask the interrupt
25105 * when acking.
25106 */
25107- i8259A_chip.irq_mask_ack = disable_8259A_irq;
25108+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
25109 else
25110- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25111+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25112+ pax_close_kernel();
25113
25114 udelay(100); /* wait for 8259A to initialize */
25115
25116diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
25117index a979b5b..1d6db75 100644
25118--- a/arch/x86/kernel/io_delay.c
25119+++ b/arch/x86/kernel/io_delay.c
25120@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
25121 * Quirk table for systems that misbehave (lock up, etc.) if port
25122 * 0x80 is used:
25123 */
25124-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
25125+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
25126 {
25127 .callback = dmi_io_delay_0xed_port,
25128 .ident = "Compaq Presario V6000",
25129diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
25130index 4ddaf66..49d5c18 100644
25131--- a/arch/x86/kernel/ioport.c
25132+++ b/arch/x86/kernel/ioport.c
25133@@ -6,6 +6,7 @@
25134 #include <linux/sched.h>
25135 #include <linux/kernel.h>
25136 #include <linux/capability.h>
25137+#include <linux/security.h>
25138 #include <linux/errno.h>
25139 #include <linux/types.h>
25140 #include <linux/ioport.h>
25141@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25142 return -EINVAL;
25143 if (turn_on && !capable(CAP_SYS_RAWIO))
25144 return -EPERM;
25145+#ifdef CONFIG_GRKERNSEC_IO
25146+ if (turn_on && grsec_disable_privio) {
25147+ gr_handle_ioperm();
25148+ return -ENODEV;
25149+ }
25150+#endif
25151
25152 /*
25153 * If it's the first ioperm() call in this thread's lifetime, set the
25154@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25155 * because the ->io_bitmap_max value must match the bitmap
25156 * contents:
25157 */
25158- tss = &per_cpu(init_tss, get_cpu());
25159+ tss = init_tss + get_cpu();
25160
25161 if (turn_on)
25162 bitmap_clear(t->io_bitmap_ptr, from, num);
25163@@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
25164 if (level > old) {
25165 if (!capable(CAP_SYS_RAWIO))
25166 return -EPERM;
25167+#ifdef CONFIG_GRKERNSEC_IO
25168+ if (grsec_disable_privio) {
25169+ gr_handle_iopl();
25170+ return -ENODEV;
25171+ }
25172+#endif
25173 }
25174 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
25175 t->iopl = level << 12;
25176diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
25177index 67b1cbe..6ad4cbc 100644
25178--- a/arch/x86/kernel/irq.c
25179+++ b/arch/x86/kernel/irq.c
25180@@ -22,7 +22,7 @@
25181 #define CREATE_TRACE_POINTS
25182 #include <asm/trace/irq_vectors.h>
25183
25184-atomic_t irq_err_count;
25185+atomic_unchecked_t irq_err_count;
25186
25187 /* Function pointer for generic interrupt vector handling */
25188 void (*x86_platform_ipi_callback)(void) = NULL;
25189@@ -132,9 +132,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
25190 seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
25191 seq_puts(p, " Hypervisor callback interrupts\n");
25192 #endif
25193- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
25194+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
25195 #if defined(CONFIG_X86_IO_APIC)
25196- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
25197+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
25198 #endif
25199 return 0;
25200 }
25201@@ -174,7 +174,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
25202
25203 u64 arch_irq_stat(void)
25204 {
25205- u64 sum = atomic_read(&irq_err_count);
25206+ u64 sum = atomic_read_unchecked(&irq_err_count);
25207 return sum;
25208 }
25209
25210diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
25211index 28d28f5..e6cc9ae 100644
25212--- a/arch/x86/kernel/irq_32.c
25213+++ b/arch/x86/kernel/irq_32.c
25214@@ -29,6 +29,8 @@ EXPORT_PER_CPU_SYMBOL(irq_regs);
25215
25216 #ifdef CONFIG_DEBUG_STACKOVERFLOW
25217
25218+extern void gr_handle_kernel_exploit(void);
25219+
25220 int sysctl_panic_on_stackoverflow __read_mostly;
25221
25222 /* Debugging check for stack overflow: is there less than 1KB free? */
25223@@ -39,13 +41,14 @@ static int check_stack_overflow(void)
25224 __asm__ __volatile__("andl %%esp,%0" :
25225 "=r" (sp) : "0" (THREAD_SIZE - 1));
25226
25227- return sp < (sizeof(struct thread_info) + STACK_WARN);
25228+ return sp < STACK_WARN;
25229 }
25230
25231 static void print_stack_overflow(void)
25232 {
25233 printk(KERN_WARNING "low stack detected by irq handler\n");
25234 dump_stack();
25235+ gr_handle_kernel_exploit();
25236 if (sysctl_panic_on_stackoverflow)
25237 panic("low stack detected by irq handler - check messages\n");
25238 }
25239@@ -77,10 +80,9 @@ static inline void *current_stack(void)
25240 static inline int
25241 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25242 {
25243- struct irq_stack *curstk, *irqstk;
25244+ struct irq_stack *irqstk;
25245 u32 *isp, *prev_esp, arg1, arg2;
25246
25247- curstk = (struct irq_stack *) current_stack();
25248 irqstk = __this_cpu_read(hardirq_stack);
25249
25250 /*
25251@@ -89,15 +91,19 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25252 * handler) we can't do that and just have to keep using the
25253 * current stack (which is the irq stack already after all)
25254 */
25255- if (unlikely(curstk == irqstk))
25256+ if (unlikely((void *)current_stack_pointer - (void *)irqstk < THREAD_SIZE))
25257 return 0;
25258
25259- isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
25260+ isp = (u32 *) ((char *)irqstk + sizeof(*irqstk) - 8);
25261
25262 /* Save the next esp at the bottom of the stack */
25263 prev_esp = (u32 *)irqstk;
25264 *prev_esp = current_stack_pointer();
25265
25266+#ifdef CONFIG_PAX_MEMORY_UDEREF
25267+ __set_fs(MAKE_MM_SEG(0));
25268+#endif
25269+
25270 if (unlikely(overflow))
25271 call_on_stack(print_stack_overflow, isp);
25272
25273@@ -108,6 +114,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25274 : "0" (irq), "1" (desc), "2" (isp),
25275 "D" (desc->handle_irq)
25276 : "memory", "cc", "ecx");
25277+
25278+#ifdef CONFIG_PAX_MEMORY_UDEREF
25279+ __set_fs(current_thread_info()->addr_limit);
25280+#endif
25281+
25282 return 1;
25283 }
25284
25285@@ -116,32 +127,18 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25286 */
25287 void irq_ctx_init(int cpu)
25288 {
25289- struct irq_stack *irqstk;
25290-
25291 if (per_cpu(hardirq_stack, cpu))
25292 return;
25293
25294- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25295- THREADINFO_GFP,
25296- THREAD_SIZE_ORDER));
25297- per_cpu(hardirq_stack, cpu) = irqstk;
25298-
25299- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25300- THREADINFO_GFP,
25301- THREAD_SIZE_ORDER));
25302- per_cpu(softirq_stack, cpu) = irqstk;
25303-
25304- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
25305- cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
25306+ per_cpu(hardirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25307+ per_cpu(softirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25308 }
25309
25310 void do_softirq_own_stack(void)
25311 {
25312- struct thread_info *curstk;
25313 struct irq_stack *irqstk;
25314 u32 *isp, *prev_esp;
25315
25316- curstk = current_stack();
25317 irqstk = __this_cpu_read(softirq_stack);
25318
25319 /* build the stack frame on the softirq stack */
25320@@ -151,7 +148,16 @@ void do_softirq_own_stack(void)
25321 prev_esp = (u32 *)irqstk;
25322 *prev_esp = current_stack_pointer();
25323
25324+#ifdef CONFIG_PAX_MEMORY_UDEREF
25325+ __set_fs(MAKE_MM_SEG(0));
25326+#endif
25327+
25328 call_on_stack(__do_softirq, isp);
25329+
25330+#ifdef CONFIG_PAX_MEMORY_UDEREF
25331+ __set_fs(current_thread_info()->addr_limit);
25332+#endif
25333+
25334 }
25335
25336 bool handle_irq(unsigned irq, struct pt_regs *regs)
25337@@ -165,7 +171,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
25338 if (unlikely(!desc))
25339 return false;
25340
25341- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25342+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25343 if (unlikely(overflow))
25344 print_stack_overflow();
25345 desc->handle_irq(irq, desc);
25346diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
25347index e4b503d..824fce8 100644
25348--- a/arch/x86/kernel/irq_64.c
25349+++ b/arch/x86/kernel/irq_64.c
25350@@ -26,6 +26,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
25351 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
25352 EXPORT_PER_CPU_SYMBOL(irq_regs);
25353
25354+extern void gr_handle_kernel_exploit(void);
25355+
25356 int sysctl_panic_on_stackoverflow;
25357
25358 /*
25359@@ -44,7 +46,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25360 u64 estack_top, estack_bottom;
25361 u64 curbase = (u64)task_stack_page(current);
25362
25363- if (user_mode_vm(regs))
25364+ if (user_mode(regs))
25365 return;
25366
25367 if (regs->sp >= curbase + sizeof(struct thread_info) +
25368@@ -69,6 +71,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25369 irq_stack_top, irq_stack_bottom,
25370 estack_top, estack_bottom);
25371
25372+ gr_handle_kernel_exploit();
25373+
25374 if (sysctl_panic_on_stackoverflow)
25375 panic("low stack detected by irq handler - check messages\n");
25376 #endif
25377diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
25378index 26d5a55..a01160a 100644
25379--- a/arch/x86/kernel/jump_label.c
25380+++ b/arch/x86/kernel/jump_label.c
25381@@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25382 * Jump label is enabled for the first time.
25383 * So we expect a default_nop...
25384 */
25385- if (unlikely(memcmp((void *)entry->code, default_nop, 5)
25386+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
25387 != 0))
25388 bug_at((void *)entry->code, __LINE__);
25389 } else {
25390@@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25391 * ...otherwise expect an ideal_nop. Otherwise
25392 * something went horribly wrong.
25393 */
25394- if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
25395+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
25396 != 0))
25397 bug_at((void *)entry->code, __LINE__);
25398 }
25399@@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry,
25400 * are converting the default nop to the ideal nop.
25401 */
25402 if (init) {
25403- if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
25404+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
25405 bug_at((void *)entry->code, __LINE__);
25406 } else {
25407 code.jump = 0xe9;
25408 code.offset = entry->target -
25409 (entry->code + JUMP_LABEL_NOP_SIZE);
25410- if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
25411+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
25412 bug_at((void *)entry->code, __LINE__);
25413 }
25414 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
25415diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
25416index 25ecd56..e12482f 100644
25417--- a/arch/x86/kernel/kgdb.c
25418+++ b/arch/x86/kernel/kgdb.c
25419@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
25420 #ifdef CONFIG_X86_32
25421 switch (regno) {
25422 case GDB_SS:
25423- if (!user_mode_vm(regs))
25424+ if (!user_mode(regs))
25425 *(unsigned long *)mem = __KERNEL_DS;
25426 break;
25427 case GDB_SP:
25428- if (!user_mode_vm(regs))
25429+ if (!user_mode(regs))
25430 *(unsigned long *)mem = kernel_stack_pointer(regs);
25431 break;
25432 case GDB_GS:
25433@@ -228,7 +228,10 @@ static void kgdb_correct_hw_break(void)
25434 bp->attr.bp_addr = breakinfo[breakno].addr;
25435 bp->attr.bp_len = breakinfo[breakno].len;
25436 bp->attr.bp_type = breakinfo[breakno].type;
25437- info->address = breakinfo[breakno].addr;
25438+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
25439+ info->address = ktla_ktva(breakinfo[breakno].addr);
25440+ else
25441+ info->address = breakinfo[breakno].addr;
25442 info->len = breakinfo[breakno].len;
25443 info->type = breakinfo[breakno].type;
25444 val = arch_install_hw_breakpoint(bp);
25445@@ -475,12 +478,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
25446 case 'k':
25447 /* clear the trace bit */
25448 linux_regs->flags &= ~X86_EFLAGS_TF;
25449- atomic_set(&kgdb_cpu_doing_single_step, -1);
25450+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
25451
25452 /* set the trace bit if we're stepping */
25453 if (remcomInBuffer[0] == 's') {
25454 linux_regs->flags |= X86_EFLAGS_TF;
25455- atomic_set(&kgdb_cpu_doing_single_step,
25456+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
25457 raw_smp_processor_id());
25458 }
25459
25460@@ -545,7 +548,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
25461
25462 switch (cmd) {
25463 case DIE_DEBUG:
25464- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
25465+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
25466 if (user_mode(regs))
25467 return single_step_cont(regs, args);
25468 break;
25469@@ -750,11 +753,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25470 #endif /* CONFIG_DEBUG_RODATA */
25471
25472 bpt->type = BP_BREAKPOINT;
25473- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
25474+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
25475 BREAK_INSTR_SIZE);
25476 if (err)
25477 return err;
25478- err = probe_kernel_write((char *)bpt->bpt_addr,
25479+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25480 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
25481 #ifdef CONFIG_DEBUG_RODATA
25482 if (!err)
25483@@ -767,7 +770,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25484 return -EBUSY;
25485 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
25486 BREAK_INSTR_SIZE);
25487- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25488+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25489 if (err)
25490 return err;
25491 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
25492@@ -792,13 +795,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
25493 if (mutex_is_locked(&text_mutex))
25494 goto knl_write;
25495 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
25496- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25497+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25498 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
25499 goto knl_write;
25500 return err;
25501 knl_write:
25502 #endif /* CONFIG_DEBUG_RODATA */
25503- return probe_kernel_write((char *)bpt->bpt_addr,
25504+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25505 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
25506 }
25507
25508diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
25509index 4e3d5a9..03fffd8 100644
25510--- a/arch/x86/kernel/kprobes/core.c
25511+++ b/arch/x86/kernel/kprobes/core.c
25512@@ -120,9 +120,12 @@ __synthesize_relative_insn(void *from, void *to, u8 op)
25513 s32 raddr;
25514 } __packed *insn;
25515
25516- insn = (struct __arch_relative_insn *)from;
25517+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
25518+
25519+ pax_open_kernel();
25520 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
25521 insn->op = op;
25522+ pax_close_kernel();
25523 }
25524
25525 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
25526@@ -168,7 +171,7 @@ int can_boost(kprobe_opcode_t *opcodes)
25527 kprobe_opcode_t opcode;
25528 kprobe_opcode_t *orig_opcodes = opcodes;
25529
25530- if (search_exception_tables((unsigned long)opcodes))
25531+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
25532 return 0; /* Page fault may occur on this address. */
25533
25534 retry:
25535@@ -260,12 +263,12 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
25536 * Fortunately, we know that the original code is the ideal 5-byte
25537 * long NOP.
25538 */
25539- memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25540+ memcpy(buf, (void *)ktla_ktva(addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25541 if (faddr)
25542 memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
25543 else
25544 buf[0] = kp->opcode;
25545- return (unsigned long)buf;
25546+ return ktva_ktla((unsigned long)buf);
25547 }
25548
25549 /*
25550@@ -364,7 +367,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25551 /* Another subsystem puts a breakpoint, failed to recover */
25552 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
25553 return 0;
25554+ pax_open_kernel();
25555 memcpy(dest, insn.kaddr, insn.length);
25556+ pax_close_kernel();
25557
25558 #ifdef CONFIG_X86_64
25559 if (insn_rip_relative(&insn)) {
25560@@ -391,7 +396,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25561 return 0;
25562 }
25563 disp = (u8 *) dest + insn_offset_displacement(&insn);
25564+ pax_open_kernel();
25565 *(s32 *) disp = (s32) newdisp;
25566+ pax_close_kernel();
25567 }
25568 #endif
25569 return insn.length;
25570@@ -533,7 +540,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25571 * nor set current_kprobe, because it doesn't use single
25572 * stepping.
25573 */
25574- regs->ip = (unsigned long)p->ainsn.insn;
25575+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25576 preempt_enable_no_resched();
25577 return;
25578 }
25579@@ -550,9 +557,9 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25580 regs->flags &= ~X86_EFLAGS_IF;
25581 /* single step inline if the instruction is an int3 */
25582 if (p->opcode == BREAKPOINT_INSTRUCTION)
25583- regs->ip = (unsigned long)p->addr;
25584+ regs->ip = ktla_ktva((unsigned long)p->addr);
25585 else
25586- regs->ip = (unsigned long)p->ainsn.insn;
25587+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25588 }
25589 NOKPROBE_SYMBOL(setup_singlestep);
25590
25591@@ -602,7 +609,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25592 struct kprobe *p;
25593 struct kprobe_ctlblk *kcb;
25594
25595- if (user_mode_vm(regs))
25596+ if (user_mode(regs))
25597 return 0;
25598
25599 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
25600@@ -637,7 +644,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25601 setup_singlestep(p, regs, kcb, 0);
25602 return 1;
25603 }
25604- } else if (*addr != BREAKPOINT_INSTRUCTION) {
25605+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
25606 /*
25607 * The breakpoint instruction was removed right
25608 * after we hit it. Another cpu has removed
25609@@ -684,6 +691,9 @@ static void __used kretprobe_trampoline_holder(void)
25610 " movq %rax, 152(%rsp)\n"
25611 RESTORE_REGS_STRING
25612 " popfq\n"
25613+#ifdef KERNEXEC_PLUGIN
25614+ " btsq $63,(%rsp)\n"
25615+#endif
25616 #else
25617 " pushf\n"
25618 SAVE_REGS_STRING
25619@@ -824,7 +834,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
25620 struct kprobe_ctlblk *kcb)
25621 {
25622 unsigned long *tos = stack_addr(regs);
25623- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
25624+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
25625 unsigned long orig_ip = (unsigned long)p->addr;
25626 kprobe_opcode_t *insn = p->ainsn.insn;
25627
25628@@ -1007,7 +1017,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
25629 struct die_args *args = data;
25630 int ret = NOTIFY_DONE;
25631
25632- if (args->regs && user_mode_vm(args->regs))
25633+ if (args->regs && user_mode(args->regs))
25634 return ret;
25635
25636 if (val == DIE_GPF) {
25637diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
25638index 7b3b9d1..e2478b91 100644
25639--- a/arch/x86/kernel/kprobes/opt.c
25640+++ b/arch/x86/kernel/kprobes/opt.c
25641@@ -79,6 +79,7 @@ found:
25642 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
25643 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25644 {
25645+ pax_open_kernel();
25646 #ifdef CONFIG_X86_64
25647 *addr++ = 0x48;
25648 *addr++ = 0xbf;
25649@@ -86,6 +87,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25650 *addr++ = 0xb8;
25651 #endif
25652 *(unsigned long *)addr = val;
25653+ pax_close_kernel();
25654 }
25655
25656 asm (
25657@@ -342,7 +344,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
25658 * Verify if the address gap is in 2GB range, because this uses
25659 * a relative jump.
25660 */
25661- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
25662+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
25663 if (abs(rel) > 0x7fffffff) {
25664 __arch_remove_optimized_kprobe(op, 0);
25665 return -ERANGE;
25666@@ -359,16 +361,18 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
25667 op->optinsn.size = ret;
25668
25669 /* Copy arch-dep-instance from template */
25670- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
25671+ pax_open_kernel();
25672+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
25673+ pax_close_kernel();
25674
25675 /* Set probe information */
25676 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
25677
25678 /* Set probe function call */
25679- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
25680+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
25681
25682 /* Set returning jmp instruction at the tail of out-of-line buffer */
25683- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
25684+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
25685 (u8 *)op->kp.addr + op->optinsn.size);
25686
25687 flush_icache_range((unsigned long) buf,
25688@@ -393,7 +397,7 @@ void arch_optimize_kprobes(struct list_head *oplist)
25689 WARN_ON(kprobe_disabled(&op->kp));
25690
25691 /* Backup instructions which will be replaced by jump address */
25692- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
25693+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
25694 RELATIVE_ADDR_SIZE);
25695
25696 insn_buf[0] = RELATIVEJUMP_OPCODE;
25697@@ -441,7 +445,7 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
25698 /* This kprobe is really able to run optimized path. */
25699 op = container_of(p, struct optimized_kprobe, kp);
25700 /* Detour through copied instructions */
25701- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
25702+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
25703 if (!reenter)
25704 reset_current_kprobe();
25705 preempt_enable_no_resched();
25706diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
25707index c2bedae..25e7ab60 100644
25708--- a/arch/x86/kernel/ksysfs.c
25709+++ b/arch/x86/kernel/ksysfs.c
25710@@ -184,7 +184,7 @@ out:
25711
25712 static struct kobj_attribute type_attr = __ATTR_RO(type);
25713
25714-static struct bin_attribute data_attr = {
25715+static bin_attribute_no_const data_attr __read_only = {
25716 .attr = {
25717 .name = "data",
25718 .mode = S_IRUGO,
25719diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
25720index c37886d..d851d32 100644
25721--- a/arch/x86/kernel/ldt.c
25722+++ b/arch/x86/kernel/ldt.c
25723@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
25724 if (reload) {
25725 #ifdef CONFIG_SMP
25726 preempt_disable();
25727- load_LDT(pc);
25728+ load_LDT_nolock(pc);
25729 if (!cpumask_equal(mm_cpumask(current->mm),
25730 cpumask_of(smp_processor_id())))
25731 smp_call_function(flush_ldt, current->mm, 1);
25732 preempt_enable();
25733 #else
25734- load_LDT(pc);
25735+ load_LDT_nolock(pc);
25736 #endif
25737 }
25738 if (oldsize) {
25739@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
25740 return err;
25741
25742 for (i = 0; i < old->size; i++)
25743- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
25744+ write_ldt_entry(new->ldt, i, old->ldt + i);
25745 return 0;
25746 }
25747
25748@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
25749 retval = copy_ldt(&mm->context, &old_mm->context);
25750 mutex_unlock(&old_mm->context.lock);
25751 }
25752+
25753+ if (tsk == current) {
25754+ mm->context.vdso = 0;
25755+
25756+#ifdef CONFIG_X86_32
25757+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25758+ mm->context.user_cs_base = 0UL;
25759+ mm->context.user_cs_limit = ~0UL;
25760+
25761+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
25762+ cpus_clear(mm->context.cpu_user_cs_mask);
25763+#endif
25764+
25765+#endif
25766+#endif
25767+
25768+ }
25769+
25770 return retval;
25771 }
25772
25773@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
25774 }
25775 }
25776
25777+#ifdef CONFIG_PAX_SEGMEXEC
25778+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
25779+ error = -EINVAL;
25780+ goto out_unlock;
25781+ }
25782+#endif
25783+
25784 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
25785 error = -EINVAL;
25786 goto out_unlock;
25787diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c
25788index ff3c3101d..d7c0cd8 100644
25789--- a/arch/x86/kernel/livepatch.c
25790+++ b/arch/x86/kernel/livepatch.c
25791@@ -41,9 +41,10 @@ int klp_write_module_reloc(struct module *mod, unsigned long type,
25792 int ret, numpages, size = 4;
25793 bool readonly;
25794 unsigned long val;
25795- unsigned long core = (unsigned long)mod->module_core;
25796- unsigned long core_ro_size = mod->core_ro_size;
25797- unsigned long core_size = mod->core_size;
25798+ unsigned long core_rx = (unsigned long)mod->module_core_rx;
25799+ unsigned long core_rw = (unsigned long)mod->module_core_rw;
25800+ unsigned long core_size_rx = mod->core_size_rx;
25801+ unsigned long core_size_rw = mod->core_size_rw;
25802
25803 switch (type) {
25804 case R_X86_64_NONE:
25805@@ -66,11 +67,12 @@ int klp_write_module_reloc(struct module *mod, unsigned long type,
25806 return -EINVAL;
25807 }
25808
25809- if (loc < core || loc >= core + core_size)
25810+ if ((loc < core_rx || loc >= core_rx + core_size_rx) &&
25811+ (loc < core_rw || loc >= core_rw + core_size_rw))
25812 /* loc does not point to any symbol inside the module */
25813 return -EINVAL;
25814
25815- if (loc < core + core_ro_size)
25816+ if (loc < core_rx + core_size_rx)
25817 readonly = true;
25818 else
25819 readonly = false;
25820diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
25821index 469b23d..5449cfe 100644
25822--- a/arch/x86/kernel/machine_kexec_32.c
25823+++ b/arch/x86/kernel/machine_kexec_32.c
25824@@ -26,7 +26,7 @@
25825 #include <asm/cacheflush.h>
25826 #include <asm/debugreg.h>
25827
25828-static void set_idt(void *newidt, __u16 limit)
25829+static void set_idt(struct desc_struct *newidt, __u16 limit)
25830 {
25831 struct desc_ptr curidt;
25832
25833@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
25834 }
25835
25836
25837-static void set_gdt(void *newgdt, __u16 limit)
25838+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
25839 {
25840 struct desc_ptr curgdt;
25841
25842@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
25843 }
25844
25845 control_page = page_address(image->control_code_page);
25846- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
25847+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
25848
25849 relocate_kernel_ptr = control_page;
25850 page_list[PA_CONTROL_PAGE] = __pa(control_page);
25851diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
25852index 94ea120..4154cea 100644
25853--- a/arch/x86/kernel/mcount_64.S
25854+++ b/arch/x86/kernel/mcount_64.S
25855@@ -7,7 +7,7 @@
25856 #include <linux/linkage.h>
25857 #include <asm/ptrace.h>
25858 #include <asm/ftrace.h>
25859-
25860+#include <asm/alternative-asm.h>
25861
25862 .code64
25863 .section .entry.text, "ax"
25864@@ -148,8 +148,9 @@
25865 #ifdef CONFIG_DYNAMIC_FTRACE
25866
25867 ENTRY(function_hook)
25868+ pax_force_retaddr
25869 retq
25870-END(function_hook)
25871+ENDPROC(function_hook)
25872
25873 ENTRY(ftrace_caller)
25874 /* save_mcount_regs fills in first two parameters */
25875@@ -181,8 +182,9 @@ GLOBAL(ftrace_graph_call)
25876 #endif
25877
25878 GLOBAL(ftrace_stub)
25879+ pax_force_retaddr
25880 retq
25881-END(ftrace_caller)
25882+ENDPROC(ftrace_caller)
25883
25884 ENTRY(ftrace_regs_caller)
25885 /* Save the current flags before any operations that can change them */
25886@@ -253,7 +255,7 @@ GLOBAL(ftrace_regs_caller_end)
25887
25888 jmp ftrace_return
25889
25890-END(ftrace_regs_caller)
25891+ENDPROC(ftrace_regs_caller)
25892
25893
25894 #else /* ! CONFIG_DYNAMIC_FTRACE */
25895@@ -272,18 +274,20 @@ fgraph_trace:
25896 #endif
25897
25898 GLOBAL(ftrace_stub)
25899+ pax_force_retaddr
25900 retq
25901
25902 trace:
25903 /* save_mcount_regs fills in first two parameters */
25904 save_mcount_regs
25905
25906+ pax_force_fptr ftrace_trace_function
25907 call *ftrace_trace_function
25908
25909 restore_mcount_regs
25910
25911 jmp fgraph_trace
25912-END(function_hook)
25913+ENDPROC(function_hook)
25914 #endif /* CONFIG_DYNAMIC_FTRACE */
25915 #endif /* CONFIG_FUNCTION_TRACER */
25916
25917@@ -305,8 +309,9 @@ ENTRY(ftrace_graph_caller)
25918
25919 restore_mcount_regs
25920
25921+ pax_force_retaddr
25922 retq
25923-END(ftrace_graph_caller)
25924+ENDPROC(ftrace_graph_caller)
25925
25926 GLOBAL(return_to_handler)
25927 subq $24, %rsp
25928@@ -322,5 +327,7 @@ GLOBAL(return_to_handler)
25929 movq 8(%rsp), %rdx
25930 movq (%rsp), %rax
25931 addq $24, %rsp
25932+ pax_force_fptr %rdi
25933 jmp *%rdi
25934+ENDPROC(return_to_handler)
25935 #endif
25936diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
25937index d1ac80b..f593701 100644
25938--- a/arch/x86/kernel/module.c
25939+++ b/arch/x86/kernel/module.c
25940@@ -82,17 +82,17 @@ static unsigned long int get_module_load_offset(void)
25941 }
25942 #endif
25943
25944-void *module_alloc(unsigned long size)
25945+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
25946 {
25947 void *p;
25948
25949- if (PAGE_ALIGN(size) > MODULES_LEN)
25950+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
25951 return NULL;
25952
25953 p = __vmalloc_node_range(size, MODULE_ALIGN,
25954 MODULES_VADDR + get_module_load_offset(),
25955- MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
25956- PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
25957+ MODULES_END, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
25958+ prot, 0, NUMA_NO_NODE,
25959 __builtin_return_address(0));
25960 if (p && (kasan_module_alloc(p, size) < 0)) {
25961 vfree(p);
25962@@ -102,6 +102,51 @@ void *module_alloc(unsigned long size)
25963 return p;
25964 }
25965
25966+void *module_alloc(unsigned long size)
25967+{
25968+
25969+#ifdef CONFIG_PAX_KERNEXEC
25970+ return __module_alloc(size, PAGE_KERNEL);
25971+#else
25972+ return __module_alloc(size, PAGE_KERNEL_EXEC);
25973+#endif
25974+
25975+}
25976+
25977+#ifdef CONFIG_PAX_KERNEXEC
25978+#ifdef CONFIG_X86_32
25979+void *module_alloc_exec(unsigned long size)
25980+{
25981+ struct vm_struct *area;
25982+
25983+ if (size == 0)
25984+ return NULL;
25985+
25986+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
25987+return area ? area->addr : NULL;
25988+}
25989+EXPORT_SYMBOL(module_alloc_exec);
25990+
25991+void module_memfree_exec(void *module_region)
25992+{
25993+ vunmap(module_region);
25994+}
25995+EXPORT_SYMBOL(module_memfree_exec);
25996+#else
25997+void module_memfree_exec(void *module_region)
25998+{
25999+ module_memfree(module_region);
26000+}
26001+EXPORT_SYMBOL(module_memfree_exec);
26002+
26003+void *module_alloc_exec(unsigned long size)
26004+{
26005+ return __module_alloc(size, PAGE_KERNEL_RX);
26006+}
26007+EXPORT_SYMBOL(module_alloc_exec);
26008+#endif
26009+#endif
26010+
26011 #ifdef CONFIG_X86_32
26012 int apply_relocate(Elf32_Shdr *sechdrs,
26013 const char *strtab,
26014@@ -112,14 +157,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26015 unsigned int i;
26016 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
26017 Elf32_Sym *sym;
26018- uint32_t *location;
26019+ uint32_t *plocation, location;
26020
26021 DEBUGP("Applying relocate section %u to %u\n",
26022 relsec, sechdrs[relsec].sh_info);
26023 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
26024 /* This is where to make the change */
26025- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
26026- + rel[i].r_offset;
26027+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
26028+ location = (uint32_t)plocation;
26029+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
26030+ plocation = ktla_ktva((void *)plocation);
26031 /* This is the symbol it is referring to. Note that all
26032 undefined symbols have been resolved. */
26033 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
26034@@ -128,11 +175,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26035 switch (ELF32_R_TYPE(rel[i].r_info)) {
26036 case R_386_32:
26037 /* We add the value into the location given */
26038- *location += sym->st_value;
26039+ pax_open_kernel();
26040+ *plocation += sym->st_value;
26041+ pax_close_kernel();
26042 break;
26043 case R_386_PC32:
26044 /* Add the value, subtract its position */
26045- *location += sym->st_value - (uint32_t)location;
26046+ pax_open_kernel();
26047+ *plocation += sym->st_value - location;
26048+ pax_close_kernel();
26049 break;
26050 default:
26051 pr_err("%s: Unknown relocation: %u\n",
26052@@ -177,21 +228,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
26053 case R_X86_64_NONE:
26054 break;
26055 case R_X86_64_64:
26056+ pax_open_kernel();
26057 *(u64 *)loc = val;
26058+ pax_close_kernel();
26059 break;
26060 case R_X86_64_32:
26061+ pax_open_kernel();
26062 *(u32 *)loc = val;
26063+ pax_close_kernel();
26064 if (val != *(u32 *)loc)
26065 goto overflow;
26066 break;
26067 case R_X86_64_32S:
26068+ pax_open_kernel();
26069 *(s32 *)loc = val;
26070+ pax_close_kernel();
26071 if ((s64)val != *(s32 *)loc)
26072 goto overflow;
26073 break;
26074 case R_X86_64_PC32:
26075 val -= (u64)loc;
26076+ pax_open_kernel();
26077 *(u32 *)loc = val;
26078+ pax_close_kernel();
26079+
26080 #if 0
26081 if ((s64)val != *(s32 *)loc)
26082 goto overflow;
26083diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
26084index 113e707..0a690e1 100644
26085--- a/arch/x86/kernel/msr.c
26086+++ b/arch/x86/kernel/msr.c
26087@@ -39,6 +39,7 @@
26088 #include <linux/notifier.h>
26089 #include <linux/uaccess.h>
26090 #include <linux/gfp.h>
26091+#include <linux/grsecurity.h>
26092
26093 #include <asm/processor.h>
26094 #include <asm/msr.h>
26095@@ -105,6 +106,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
26096 int err = 0;
26097 ssize_t bytes = 0;
26098
26099+#ifdef CONFIG_GRKERNSEC_KMEM
26100+ gr_handle_msr_write();
26101+ return -EPERM;
26102+#endif
26103+
26104 if (count % 8)
26105 return -EINVAL; /* Invalid chunk size */
26106
26107@@ -152,6 +158,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
26108 err = -EBADF;
26109 break;
26110 }
26111+#ifdef CONFIG_GRKERNSEC_KMEM
26112+ gr_handle_msr_write();
26113+ return -EPERM;
26114+#endif
26115 if (copy_from_user(&regs, uregs, sizeof regs)) {
26116 err = -EFAULT;
26117 break;
26118@@ -235,7 +245,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
26119 return notifier_from_errno(err);
26120 }
26121
26122-static struct notifier_block __refdata msr_class_cpu_notifier = {
26123+static struct notifier_block msr_class_cpu_notifier = {
26124 .notifier_call = msr_class_cpu_callback,
26125 };
26126
26127diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
26128index c3e985d..110a36a 100644
26129--- a/arch/x86/kernel/nmi.c
26130+++ b/arch/x86/kernel/nmi.c
26131@@ -98,16 +98,16 @@ fs_initcall(nmi_warning_debugfs);
26132
26133 static void nmi_max_handler(struct irq_work *w)
26134 {
26135- struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
26136+ struct nmiwork *n = container_of(w, struct nmiwork, irq_work);
26137 int remainder_ns, decimal_msecs;
26138- u64 whole_msecs = ACCESS_ONCE(a->max_duration);
26139+ u64 whole_msecs = ACCESS_ONCE(n->max_duration);
26140
26141 remainder_ns = do_div(whole_msecs, (1000 * 1000));
26142 decimal_msecs = remainder_ns / 1000;
26143
26144 printk_ratelimited(KERN_INFO
26145 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
26146- a->handler, whole_msecs, decimal_msecs);
26147+ n->action->handler, whole_msecs, decimal_msecs);
26148 }
26149
26150 static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26151@@ -134,11 +134,11 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26152 delta = sched_clock() - delta;
26153 trace_nmi_handler(a->handler, (int)delta, thishandled);
26154
26155- if (delta < nmi_longest_ns || delta < a->max_duration)
26156+ if (delta < nmi_longest_ns || delta < a->work->max_duration)
26157 continue;
26158
26159- a->max_duration = delta;
26160- irq_work_queue(&a->irq_work);
26161+ a->work->max_duration = delta;
26162+ irq_work_queue(&a->work->irq_work);
26163 }
26164
26165 rcu_read_unlock();
26166@@ -148,7 +148,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26167 }
26168 NOKPROBE_SYMBOL(nmi_handle);
26169
26170-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26171+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
26172 {
26173 struct nmi_desc *desc = nmi_to_desc(type);
26174 unsigned long flags;
26175@@ -156,7 +156,8 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26176 if (!action->handler)
26177 return -EINVAL;
26178
26179- init_irq_work(&action->irq_work, nmi_max_handler);
26180+ action->work->action = action;
26181+ init_irq_work(&action->work->irq_work, nmi_max_handler);
26182
26183 spin_lock_irqsave(&desc->lock, flags);
26184
26185@@ -174,9 +175,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26186 * event confuses some handlers (kdump uses this flag)
26187 */
26188 if (action->flags & NMI_FLAG_FIRST)
26189- list_add_rcu(&action->list, &desc->head);
26190+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
26191 else
26192- list_add_tail_rcu(&action->list, &desc->head);
26193+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
26194
26195 spin_unlock_irqrestore(&desc->lock, flags);
26196 return 0;
26197@@ -199,7 +200,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
26198 if (!strcmp(n->name, name)) {
26199 WARN(in_nmi(),
26200 "Trying to free NMI (%s) from NMI context!\n", n->name);
26201- list_del_rcu(&n->list);
26202+ pax_list_del_rcu((struct list_head *)&n->list);
26203 break;
26204 }
26205 }
26206@@ -528,6 +529,17 @@ static inline void nmi_nesting_postprocess(void)
26207 dotraplinkage notrace void
26208 do_nmi(struct pt_regs *regs, long error_code)
26209 {
26210+
26211+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26212+ if (!user_mode(regs)) {
26213+ unsigned long cs = regs->cs & 0xFFFF;
26214+ unsigned long ip = ktva_ktla(regs->ip);
26215+
26216+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
26217+ regs->ip = ip;
26218+ }
26219+#endif
26220+
26221 nmi_nesting_preprocess(regs);
26222
26223 nmi_enter();
26224diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
26225index 6d9582e..f746287 100644
26226--- a/arch/x86/kernel/nmi_selftest.c
26227+++ b/arch/x86/kernel/nmi_selftest.c
26228@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
26229 {
26230 /* trap all the unknown NMIs we may generate */
26231 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
26232- __initdata);
26233+ __initconst);
26234 }
26235
26236 static void __init cleanup_nmi_testsuite(void)
26237@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
26238 unsigned long timeout;
26239
26240 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
26241- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
26242+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
26243 nmi_fail = FAILURE;
26244 return;
26245 }
26246diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
26247index bbb6c73..24a58ef 100644
26248--- a/arch/x86/kernel/paravirt-spinlocks.c
26249+++ b/arch/x86/kernel/paravirt-spinlocks.c
26250@@ -8,7 +8,7 @@
26251
26252 #include <asm/paravirt.h>
26253
26254-struct pv_lock_ops pv_lock_ops = {
26255+struct pv_lock_ops pv_lock_ops __read_only = {
26256 #ifdef CONFIG_SMP
26257 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
26258 .unlock_kick = paravirt_nop,
26259diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
26260index 548d25f..f8fb99c 100644
26261--- a/arch/x86/kernel/paravirt.c
26262+++ b/arch/x86/kernel/paravirt.c
26263@@ -56,6 +56,9 @@ u64 _paravirt_ident_64(u64 x)
26264 {
26265 return x;
26266 }
26267+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26268+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
26269+#endif
26270
26271 void __init default_banner(void)
26272 {
26273@@ -142,16 +145,20 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
26274
26275 if (opfunc == NULL)
26276 /* If there's no function, patch it with a ud2a (BUG) */
26277- ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
26278- else if (opfunc == _paravirt_nop)
26279+ ret = paravirt_patch_insns(insnbuf, len, ktva_ktla(ud2a), ud2a+sizeof(ud2a));
26280+ else if (opfunc == (void *)_paravirt_nop)
26281 /* If the operation is a nop, then nop the callsite */
26282 ret = paravirt_patch_nop();
26283
26284 /* identity functions just return their single argument */
26285- else if (opfunc == _paravirt_ident_32)
26286+ else if (opfunc == (void *)_paravirt_ident_32)
26287 ret = paravirt_patch_ident_32(insnbuf, len);
26288- else if (opfunc == _paravirt_ident_64)
26289+ else if (opfunc == (void *)_paravirt_ident_64)
26290 ret = paravirt_patch_ident_64(insnbuf, len);
26291+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26292+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
26293+ ret = paravirt_patch_ident_64(insnbuf, len);
26294+#endif
26295
26296 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
26297 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
26298@@ -176,7 +183,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
26299 if (insn_len > len || start == NULL)
26300 insn_len = len;
26301 else
26302- memcpy(insnbuf, start, insn_len);
26303+ memcpy(insnbuf, ktla_ktva(start), insn_len);
26304
26305 return insn_len;
26306 }
26307@@ -300,7 +307,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
26308 return this_cpu_read(paravirt_lazy_mode);
26309 }
26310
26311-struct pv_info pv_info = {
26312+struct pv_info pv_info __read_only = {
26313 .name = "bare hardware",
26314 .paravirt_enabled = 0,
26315 .kernel_rpl = 0,
26316@@ -311,16 +318,16 @@ struct pv_info pv_info = {
26317 #endif
26318 };
26319
26320-struct pv_init_ops pv_init_ops = {
26321+struct pv_init_ops pv_init_ops __read_only = {
26322 .patch = native_patch,
26323 };
26324
26325-struct pv_time_ops pv_time_ops = {
26326+struct pv_time_ops pv_time_ops __read_only = {
26327 .sched_clock = native_sched_clock,
26328 .steal_clock = native_steal_clock,
26329 };
26330
26331-__visible struct pv_irq_ops pv_irq_ops = {
26332+__visible struct pv_irq_ops pv_irq_ops __read_only = {
26333 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
26334 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
26335 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
26336@@ -332,7 +339,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
26337 #endif
26338 };
26339
26340-__visible struct pv_cpu_ops pv_cpu_ops = {
26341+__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
26342 .cpuid = native_cpuid,
26343 .get_debugreg = native_get_debugreg,
26344 .set_debugreg = native_set_debugreg,
26345@@ -395,21 +402,26 @@ NOKPROBE_SYMBOL(native_get_debugreg);
26346 NOKPROBE_SYMBOL(native_set_debugreg);
26347 NOKPROBE_SYMBOL(native_load_idt);
26348
26349-struct pv_apic_ops pv_apic_ops = {
26350+struct pv_apic_ops pv_apic_ops __read_only= {
26351 #ifdef CONFIG_X86_LOCAL_APIC
26352 .startup_ipi_hook = paravirt_nop,
26353 #endif
26354 };
26355
26356-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
26357+#ifdef CONFIG_X86_32
26358+#ifdef CONFIG_X86_PAE
26359+/* 64-bit pagetable entries */
26360+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
26361+#else
26362 /* 32-bit pagetable entries */
26363 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
26364+#endif
26365 #else
26366 /* 64-bit pagetable entries */
26367 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
26368 #endif
26369
26370-struct pv_mmu_ops pv_mmu_ops = {
26371+struct pv_mmu_ops pv_mmu_ops __read_only = {
26372
26373 .read_cr2 = native_read_cr2,
26374 .write_cr2 = native_write_cr2,
26375@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
26376 .make_pud = PTE_IDENT,
26377
26378 .set_pgd = native_set_pgd,
26379+ .set_pgd_batched = native_set_pgd_batched,
26380 #endif
26381 #endif /* PAGETABLE_LEVELS >= 3 */
26382
26383@@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
26384 },
26385
26386 .set_fixmap = native_set_fixmap,
26387+
26388+#ifdef CONFIG_PAX_KERNEXEC
26389+ .pax_open_kernel = native_pax_open_kernel,
26390+ .pax_close_kernel = native_pax_close_kernel,
26391+#endif
26392+
26393 };
26394
26395 EXPORT_SYMBOL_GPL(pv_time_ops);
26396diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
26397index a1da673..b6f5831 100644
26398--- a/arch/x86/kernel/paravirt_patch_64.c
26399+++ b/arch/x86/kernel/paravirt_patch_64.c
26400@@ -9,7 +9,11 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
26401 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
26402 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
26403 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
26404+
26405+#ifndef CONFIG_PAX_MEMORY_UDEREF
26406 DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
26407+#endif
26408+
26409 DEF_NATIVE(pv_cpu_ops, clts, "clts");
26410 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
26411
26412@@ -57,7 +61,11 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
26413 PATCH_SITE(pv_mmu_ops, read_cr3);
26414 PATCH_SITE(pv_mmu_ops, write_cr3);
26415 PATCH_SITE(pv_cpu_ops, clts);
26416+
26417+#ifndef CONFIG_PAX_MEMORY_UDEREF
26418 PATCH_SITE(pv_mmu_ops, flush_tlb_single);
26419+#endif
26420+
26421 PATCH_SITE(pv_cpu_ops, wbinvd);
26422
26423 patch_site:
26424diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
26425index 0497f71..7186c0d 100644
26426--- a/arch/x86/kernel/pci-calgary_64.c
26427+++ b/arch/x86/kernel/pci-calgary_64.c
26428@@ -1347,7 +1347,7 @@ static void __init get_tce_space_from_tar(void)
26429 tce_space = be64_to_cpu(readq(target));
26430 tce_space = tce_space & TAR_SW_BITS;
26431
26432- tce_space = tce_space & (~specified_table_size);
26433+ tce_space = tce_space & (~(unsigned long)specified_table_size);
26434 info->tce_space = (u64 *)__va(tce_space);
26435 }
26436 }
26437diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
26438index 35ccf75..7a15747 100644
26439--- a/arch/x86/kernel/pci-iommu_table.c
26440+++ b/arch/x86/kernel/pci-iommu_table.c
26441@@ -2,7 +2,7 @@
26442 #include <asm/iommu_table.h>
26443 #include <linux/string.h>
26444 #include <linux/kallsyms.h>
26445-
26446+#include <linux/sched.h>
26447
26448 #define DEBUG 1
26449
26450diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
26451index 77dd0ad..9ec4723 100644
26452--- a/arch/x86/kernel/pci-swiotlb.c
26453+++ b/arch/x86/kernel/pci-swiotlb.c
26454@@ -33,7 +33,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size,
26455 struct dma_attrs *attrs)
26456 {
26457 if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
26458- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
26459+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
26460 else
26461 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
26462 }
26463diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
26464index a388bb8..97064ad 100644
26465--- a/arch/x86/kernel/process.c
26466+++ b/arch/x86/kernel/process.c
26467@@ -38,7 +38,8 @@
26468 * section. Since TSS's are completely CPU-local, we want them
26469 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
26470 */
26471-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
26472+struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
26473+EXPORT_SYMBOL(init_tss);
26474
26475 #ifdef CONFIG_X86_64
26476 static DEFINE_PER_CPU(unsigned char, is_idle);
26477@@ -96,7 +97,7 @@ void arch_task_cache_init(void)
26478 task_xstate_cachep =
26479 kmem_cache_create("task_xstate", xstate_size,
26480 __alignof__(union thread_xstate),
26481- SLAB_PANIC | SLAB_NOTRACK, NULL);
26482+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
26483 setup_xstate_comp();
26484 }
26485
26486@@ -110,7 +111,7 @@ void exit_thread(void)
26487 unsigned long *bp = t->io_bitmap_ptr;
26488
26489 if (bp) {
26490- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
26491+ struct tss_struct *tss = init_tss + get_cpu();
26492
26493 t->io_bitmap_ptr = NULL;
26494 clear_thread_flag(TIF_IO_BITMAP);
26495@@ -130,6 +131,9 @@ void flush_thread(void)
26496 {
26497 struct task_struct *tsk = current;
26498
26499+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
26500+ loadsegment(gs, 0);
26501+#endif
26502 flush_ptrace_hw_breakpoint(tsk);
26503 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
26504 drop_init_fpu(tsk);
26505@@ -276,7 +280,7 @@ static void __exit_idle(void)
26506 void exit_idle(void)
26507 {
26508 /* idle loop has pid 0 */
26509- if (current->pid)
26510+ if (task_pid_nr(current))
26511 return;
26512 __exit_idle();
26513 }
26514@@ -329,7 +333,7 @@ bool xen_set_default_idle(void)
26515 return ret;
26516 }
26517 #endif
26518-void stop_this_cpu(void *dummy)
26519+__noreturn void stop_this_cpu(void *dummy)
26520 {
26521 local_irq_disable();
26522 /*
26523@@ -508,16 +512,37 @@ static int __init idle_setup(char *str)
26524 }
26525 early_param("idle", idle_setup);
26526
26527-unsigned long arch_align_stack(unsigned long sp)
26528+#ifdef CONFIG_PAX_RANDKSTACK
26529+void pax_randomize_kstack(struct pt_regs *regs)
26530 {
26531- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
26532- sp -= get_random_int() % 8192;
26533- return sp & ~0xf;
26534-}
26535+ struct thread_struct *thread = &current->thread;
26536+ unsigned long time;
26537
26538-unsigned long arch_randomize_brk(struct mm_struct *mm)
26539-{
26540- unsigned long range_end = mm->brk + 0x02000000;
26541- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
26542-}
26543+ if (!randomize_va_space)
26544+ return;
26545+
26546+ if (v8086_mode(regs))
26547+ return;
26548
26549+ rdtscl(time);
26550+
26551+ /* P4 seems to return a 0 LSB, ignore it */
26552+#ifdef CONFIG_MPENTIUM4
26553+ time &= 0x3EUL;
26554+ time <<= 2;
26555+#elif defined(CONFIG_X86_64)
26556+ time &= 0xFUL;
26557+ time <<= 4;
26558+#else
26559+ time &= 0x1FUL;
26560+ time <<= 3;
26561+#endif
26562+
26563+ thread->sp0 ^= time;
26564+ load_sp0(init_tss + smp_processor_id(), thread);
26565+
26566+#ifdef CONFIG_X86_64
26567+ this_cpu_write(kernel_stack, thread->sp0);
26568+#endif
26569+}
26570+#endif
26571diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
26572index 603c4f9..3a105d7 100644
26573--- a/arch/x86/kernel/process_32.c
26574+++ b/arch/x86/kernel/process_32.c
26575@@ -64,6 +64,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
26576 unsigned long thread_saved_pc(struct task_struct *tsk)
26577 {
26578 return ((unsigned long *)tsk->thread.sp)[3];
26579+//XXX return tsk->thread.eip;
26580 }
26581
26582 void __show_regs(struct pt_regs *regs, int all)
26583@@ -73,19 +74,18 @@ void __show_regs(struct pt_regs *regs, int all)
26584 unsigned long sp;
26585 unsigned short ss, gs;
26586
26587- if (user_mode_vm(regs)) {
26588+ if (user_mode(regs)) {
26589 sp = regs->sp;
26590 ss = regs->ss & 0xffff;
26591- gs = get_user_gs(regs);
26592 } else {
26593 sp = kernel_stack_pointer(regs);
26594 savesegment(ss, ss);
26595- savesegment(gs, gs);
26596 }
26597+ gs = get_user_gs(regs);
26598
26599 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
26600 (u16)regs->cs, regs->ip, regs->flags,
26601- smp_processor_id());
26602+ raw_smp_processor_id());
26603 print_symbol("EIP is at %s\n", regs->ip);
26604
26605 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
26606@@ -132,21 +132,22 @@ void release_thread(struct task_struct *dead_task)
26607 int copy_thread(unsigned long clone_flags, unsigned long sp,
26608 unsigned long arg, struct task_struct *p)
26609 {
26610- struct pt_regs *childregs = task_pt_regs(p);
26611+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
26612 struct task_struct *tsk;
26613 int err;
26614
26615 p->thread.sp = (unsigned long) childregs;
26616 p->thread.sp0 = (unsigned long) (childregs+1);
26617+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
26618 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26619
26620 if (unlikely(p->flags & PF_KTHREAD)) {
26621 /* kernel thread */
26622 memset(childregs, 0, sizeof(struct pt_regs));
26623 p->thread.ip = (unsigned long) ret_from_kernel_thread;
26624- task_user_gs(p) = __KERNEL_STACK_CANARY;
26625- childregs->ds = __USER_DS;
26626- childregs->es = __USER_DS;
26627+ savesegment(gs, childregs->gs);
26628+ childregs->ds = __KERNEL_DS;
26629+ childregs->es = __KERNEL_DS;
26630 childregs->fs = __KERNEL_PERCPU;
26631 childregs->bx = sp; /* function */
26632 childregs->bp = arg;
26633@@ -248,7 +249,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26634 struct thread_struct *prev = &prev_p->thread,
26635 *next = &next_p->thread;
26636 int cpu = smp_processor_id();
26637- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26638+ struct tss_struct *tss = init_tss + cpu;
26639 fpu_switch_t fpu;
26640
26641 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
26642@@ -272,6 +273,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26643 */
26644 lazy_save_gs(prev->gs);
26645
26646+#ifdef CONFIG_PAX_MEMORY_UDEREF
26647+ __set_fs(task_thread_info(next_p)->addr_limit);
26648+#endif
26649+
26650 /*
26651 * Load the per-thread Thread-Local Storage descriptor.
26652 */
26653@@ -310,9 +315,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26654 */
26655 arch_end_context_switch(next_p);
26656
26657- this_cpu_write(kernel_stack,
26658- (unsigned long)task_stack_page(next_p) +
26659- THREAD_SIZE - KERNEL_STACK_OFFSET);
26660+ this_cpu_write(current_task, next_p);
26661+ this_cpu_write(current_tinfo, &next_p->tinfo);
26662+ this_cpu_write(kernel_stack, next->sp0);
26663
26664 /*
26665 * Restore %gs if needed (which is common)
26666@@ -322,8 +327,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26667
26668 switch_fpu_finish(next_p, fpu);
26669
26670- this_cpu_write(current_task, next_p);
26671-
26672 return prev_p;
26673 }
26674
26675@@ -353,4 +356,3 @@ unsigned long get_wchan(struct task_struct *p)
26676 } while (count++ < 16);
26677 return 0;
26678 }
26679-
26680diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
26681index 67fcc43..0d2c630 100644
26682--- a/arch/x86/kernel/process_64.c
26683+++ b/arch/x86/kernel/process_64.c
26684@@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26685 struct pt_regs *childregs;
26686 struct task_struct *me = current;
26687
26688- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
26689+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
26690 childregs = task_pt_regs(p);
26691 p->thread.sp = (unsigned long) childregs;
26692 p->thread.usersp = me->thread.usersp;
26693+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
26694 set_tsk_thread_flag(p, TIF_FORK);
26695 p->thread.io_bitmap_ptr = NULL;
26696
26697@@ -171,6 +172,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26698 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
26699 savesegment(es, p->thread.es);
26700 savesegment(ds, p->thread.ds);
26701+ savesegment(ss, p->thread.ss);
26702+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
26703 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26704
26705 if (unlikely(p->flags & PF_KTHREAD)) {
26706@@ -277,7 +280,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26707 struct thread_struct *prev = &prev_p->thread;
26708 struct thread_struct *next = &next_p->thread;
26709 int cpu = smp_processor_id();
26710- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26711+ struct tss_struct *tss = init_tss + cpu;
26712 unsigned fsindex, gsindex;
26713 fpu_switch_t fpu;
26714
26715@@ -331,6 +334,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26716 if (unlikely(next->ds | prev->ds))
26717 loadsegment(ds, next->ds);
26718
26719+ savesegment(ss, prev->ss);
26720+ if (unlikely(next->ss != prev->ss))
26721+ loadsegment(ss, next->ss);
26722+
26723 /*
26724 * Switch FS and GS.
26725 *
26726@@ -404,6 +411,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26727 prev->usersp = this_cpu_read(old_rsp);
26728 this_cpu_write(old_rsp, next->usersp);
26729 this_cpu_write(current_task, next_p);
26730+ this_cpu_write(current_tinfo, &next_p->tinfo);
26731
26732 /*
26733 * If it were not for PREEMPT_ACTIVE we could guarantee that the
26734@@ -413,9 +421,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26735 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
26736 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
26737
26738- this_cpu_write(kernel_stack,
26739- (unsigned long)task_stack_page(next_p) +
26740- THREAD_SIZE - KERNEL_STACK_OFFSET);
26741+ this_cpu_write(kernel_stack, next->sp0);
26742
26743 /*
26744 * Now maybe reload the debug registers and handle I/O bitmaps
26745@@ -485,12 +491,11 @@ unsigned long get_wchan(struct task_struct *p)
26746 if (!p || p == current || p->state == TASK_RUNNING)
26747 return 0;
26748 stack = (unsigned long)task_stack_page(p);
26749- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
26750+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
26751 return 0;
26752 fp = *(u64 *)(p->thread.sp);
26753 do {
26754- if (fp < (unsigned long)stack ||
26755- fp >= (unsigned long)stack+THREAD_SIZE)
26756+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
26757 return 0;
26758 ip = *(u64 *)(fp+8);
26759 if (!in_sched_functions(ip))
26760diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
26761index e510618..5165ac0 100644
26762--- a/arch/x86/kernel/ptrace.c
26763+++ b/arch/x86/kernel/ptrace.c
26764@@ -186,10 +186,10 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
26765 unsigned long sp = (unsigned long)&regs->sp;
26766 u32 *prev_esp;
26767
26768- if (context == (sp & ~(THREAD_SIZE - 1)))
26769+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
26770 return sp;
26771
26772- prev_esp = (u32 *)(context);
26773+ prev_esp = *(u32 **)(context);
26774 if (prev_esp)
26775 return (unsigned long)prev_esp;
26776
26777@@ -452,6 +452,20 @@ static int putreg(struct task_struct *child,
26778 if (child->thread.gs != value)
26779 return do_arch_prctl(child, ARCH_SET_GS, value);
26780 return 0;
26781+
26782+ case offsetof(struct user_regs_struct,ip):
26783+ /*
26784+ * Protect against any attempt to set ip to an
26785+ * impossible address. There are dragons lurking if the
26786+ * address is noncanonical. (This explicitly allows
26787+ * setting ip to TASK_SIZE_MAX, because user code can do
26788+ * that all by itself by running off the end of its
26789+ * address space.
26790+ */
26791+ if (value > TASK_SIZE_MAX)
26792+ return -EIO;
26793+ break;
26794+
26795 #endif
26796 }
26797
26798@@ -588,7 +602,7 @@ static void ptrace_triggered(struct perf_event *bp,
26799 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
26800 {
26801 int i;
26802- int dr7 = 0;
26803+ unsigned long dr7 = 0;
26804 struct arch_hw_breakpoint *info;
26805
26806 for (i = 0; i < HBP_NUM; i++) {
26807@@ -822,7 +836,7 @@ long arch_ptrace(struct task_struct *child, long request,
26808 unsigned long addr, unsigned long data)
26809 {
26810 int ret;
26811- unsigned long __user *datap = (unsigned long __user *)data;
26812+ unsigned long __user *datap = (__force unsigned long __user *)data;
26813
26814 switch (request) {
26815 /* read the word at location addr in the USER area. */
26816@@ -907,14 +921,14 @@ long arch_ptrace(struct task_struct *child, long request,
26817 if ((int) addr < 0)
26818 return -EIO;
26819 ret = do_get_thread_area(child, addr,
26820- (struct user_desc __user *)data);
26821+ (__force struct user_desc __user *) data);
26822 break;
26823
26824 case PTRACE_SET_THREAD_AREA:
26825 if ((int) addr < 0)
26826 return -EIO;
26827 ret = do_set_thread_area(child, addr,
26828- (struct user_desc __user *)data, 0);
26829+ (__force struct user_desc __user *) data, 0);
26830 break;
26831 #endif
26832
26833@@ -1292,7 +1306,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
26834
26835 #ifdef CONFIG_X86_64
26836
26837-static struct user_regset x86_64_regsets[] __read_mostly = {
26838+static user_regset_no_const x86_64_regsets[] __read_only = {
26839 [REGSET_GENERAL] = {
26840 .core_note_type = NT_PRSTATUS,
26841 .n = sizeof(struct user_regs_struct) / sizeof(long),
26842@@ -1333,7 +1347,7 @@ static const struct user_regset_view user_x86_64_view = {
26843 #endif /* CONFIG_X86_64 */
26844
26845 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
26846-static struct user_regset x86_32_regsets[] __read_mostly = {
26847+static user_regset_no_const x86_32_regsets[] __read_only = {
26848 [REGSET_GENERAL] = {
26849 .core_note_type = NT_PRSTATUS,
26850 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
26851@@ -1386,7 +1400,7 @@ static const struct user_regset_view user_x86_32_view = {
26852 */
26853 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
26854
26855-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26856+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26857 {
26858 #ifdef CONFIG_X86_64
26859 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
26860@@ -1421,7 +1435,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
26861 memset(info, 0, sizeof(*info));
26862 info->si_signo = SIGTRAP;
26863 info->si_code = si_code;
26864- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
26865+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
26866 }
26867
26868 void user_single_step_siginfo(struct task_struct *tsk,
26869@@ -1455,6 +1469,10 @@ static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
26870 }
26871 }
26872
26873+#ifdef CONFIG_GRKERNSEC_SETXID
26874+extern void gr_delayed_cred_worker(void);
26875+#endif
26876+
26877 /*
26878 * We can return 0 to resume the syscall or anything else to go to phase
26879 * 2. If we resume the syscall, we need to put something appropriate in
26880@@ -1562,6 +1580,11 @@ long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
26881
26882 BUG_ON(regs != task_pt_regs(current));
26883
26884+#ifdef CONFIG_GRKERNSEC_SETXID
26885+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26886+ gr_delayed_cred_worker();
26887+#endif
26888+
26889 /*
26890 * If we stepped into a sysenter/syscall insn, it trapped in
26891 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
26892@@ -1620,6 +1643,11 @@ void syscall_trace_leave(struct pt_regs *regs)
26893 */
26894 user_exit();
26895
26896+#ifdef CONFIG_GRKERNSEC_SETXID
26897+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26898+ gr_delayed_cred_worker();
26899+#endif
26900+
26901 audit_syscall_exit(regs);
26902
26903 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
26904diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
26905index e5ecd20..60f7eef 100644
26906--- a/arch/x86/kernel/pvclock.c
26907+++ b/arch/x86/kernel/pvclock.c
26908@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
26909 reset_hung_task_detector();
26910 }
26911
26912-static atomic64_t last_value = ATOMIC64_INIT(0);
26913+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
26914
26915 void pvclock_resume(void)
26916 {
26917- atomic64_set(&last_value, 0);
26918+ atomic64_set_unchecked(&last_value, 0);
26919 }
26920
26921 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
26922@@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
26923 * updating at the same time, and one of them could be slightly behind,
26924 * making the assumption that last_value always go forward fail to hold.
26925 */
26926- last = atomic64_read(&last_value);
26927+ last = atomic64_read_unchecked(&last_value);
26928 do {
26929 if (ret < last)
26930 return last;
26931- last = atomic64_cmpxchg(&last_value, last, ret);
26932+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
26933 } while (unlikely(last != ret));
26934
26935 return ret;
26936diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
26937index 86db4bc..a50a54a 100644
26938--- a/arch/x86/kernel/reboot.c
26939+++ b/arch/x86/kernel/reboot.c
26940@@ -70,6 +70,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
26941
26942 void __noreturn machine_real_restart(unsigned int type)
26943 {
26944+
26945+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
26946+ struct desc_struct *gdt;
26947+#endif
26948+
26949 local_irq_disable();
26950
26951 /*
26952@@ -97,7 +102,29 @@ void __noreturn machine_real_restart(unsigned int type)
26953
26954 /* Jump to the identity-mapped low memory code */
26955 #ifdef CONFIG_X86_32
26956- asm volatile("jmpl *%0" : :
26957+
26958+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
26959+ gdt = get_cpu_gdt_table(smp_processor_id());
26960+ pax_open_kernel();
26961+#ifdef CONFIG_PAX_MEMORY_UDEREF
26962+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
26963+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
26964+ loadsegment(ds, __KERNEL_DS);
26965+ loadsegment(es, __KERNEL_DS);
26966+ loadsegment(ss, __KERNEL_DS);
26967+#endif
26968+#ifdef CONFIG_PAX_KERNEXEC
26969+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
26970+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
26971+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
26972+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
26973+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
26974+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
26975+#endif
26976+ pax_close_kernel();
26977+#endif
26978+
26979+ asm volatile("ljmpl *%0" : :
26980 "rm" (real_mode_header->machine_real_restart_asm),
26981 "a" (type));
26982 #else
26983@@ -137,7 +164,7 @@ static int __init set_kbd_reboot(const struct dmi_system_id *d)
26984 /*
26985 * This is a single dmi_table handling all reboot quirks.
26986 */
26987-static struct dmi_system_id __initdata reboot_dmi_table[] = {
26988+static const struct dmi_system_id __initconst reboot_dmi_table[] = {
26989
26990 /* Acer */
26991 { /* Handle reboot issue on Acer Aspire one */
26992@@ -511,7 +538,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
26993 * This means that this function can never return, it can misbehave
26994 * by not rebooting properly and hanging.
26995 */
26996-static void native_machine_emergency_restart(void)
26997+static void __noreturn native_machine_emergency_restart(void)
26998 {
26999 int i;
27000 int attempt = 0;
27001@@ -631,13 +658,13 @@ void native_machine_shutdown(void)
27002 #endif
27003 }
27004
27005-static void __machine_emergency_restart(int emergency)
27006+static void __noreturn __machine_emergency_restart(int emergency)
27007 {
27008 reboot_emergency = emergency;
27009 machine_ops.emergency_restart();
27010 }
27011
27012-static void native_machine_restart(char *__unused)
27013+static void __noreturn native_machine_restart(char *__unused)
27014 {
27015 pr_notice("machine restart\n");
27016
27017@@ -646,7 +673,7 @@ static void native_machine_restart(char *__unused)
27018 __machine_emergency_restart(0);
27019 }
27020
27021-static void native_machine_halt(void)
27022+static void __noreturn native_machine_halt(void)
27023 {
27024 /* Stop other cpus and apics */
27025 machine_shutdown();
27026@@ -656,7 +683,7 @@ static void native_machine_halt(void)
27027 stop_this_cpu(NULL);
27028 }
27029
27030-static void native_machine_power_off(void)
27031+static void __noreturn native_machine_power_off(void)
27032 {
27033 if (pm_power_off) {
27034 if (!reboot_force)
27035@@ -665,9 +692,10 @@ static void native_machine_power_off(void)
27036 }
27037 /* A fallback in case there is no PM info available */
27038 tboot_shutdown(TB_SHUTDOWN_HALT);
27039+ unreachable();
27040 }
27041
27042-struct machine_ops machine_ops = {
27043+struct machine_ops machine_ops __read_only = {
27044 .power_off = native_machine_power_off,
27045 .shutdown = native_machine_shutdown,
27046 .emergency_restart = native_machine_emergency_restart,
27047diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
27048index c8e41e9..64049ef 100644
27049--- a/arch/x86/kernel/reboot_fixups_32.c
27050+++ b/arch/x86/kernel/reboot_fixups_32.c
27051@@ -57,7 +57,7 @@ struct device_fixup {
27052 unsigned int vendor;
27053 unsigned int device;
27054 void (*reboot_fixup)(struct pci_dev *);
27055-};
27056+} __do_const;
27057
27058 /*
27059 * PCI ids solely used for fixups_table go here
27060diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
27061index 3fd2c69..a444264 100644
27062--- a/arch/x86/kernel/relocate_kernel_64.S
27063+++ b/arch/x86/kernel/relocate_kernel_64.S
27064@@ -96,8 +96,7 @@ relocate_kernel:
27065
27066 /* jump to identity mapped page */
27067 addq $(identity_mapped - relocate_kernel), %r8
27068- pushq %r8
27069- ret
27070+ jmp *%r8
27071
27072 identity_mapped:
27073 /* set return address to 0 if not preserving context */
27074diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
27075index 0a2421c..11f3f36 100644
27076--- a/arch/x86/kernel/setup.c
27077+++ b/arch/x86/kernel/setup.c
27078@@ -111,6 +111,7 @@
27079 #include <asm/mce.h>
27080 #include <asm/alternative.h>
27081 #include <asm/prom.h>
27082+#include <asm/boot.h>
27083
27084 /*
27085 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
27086@@ -206,10 +207,12 @@ EXPORT_SYMBOL(boot_cpu_data);
27087 #endif
27088
27089
27090-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
27091-__visible unsigned long mmu_cr4_features;
27092+#ifdef CONFIG_X86_64
27093+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
27094+#elif defined(CONFIG_X86_PAE)
27095+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
27096 #else
27097-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
27098+__visible unsigned long mmu_cr4_features __read_only;
27099 #endif
27100
27101 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
27102@@ -771,7 +774,7 @@ static void __init trim_bios_range(void)
27103 * area (640->1Mb) as ram even though it is not.
27104 * take them out.
27105 */
27106- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
27107+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
27108
27109 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
27110 }
27111@@ -779,7 +782,7 @@ static void __init trim_bios_range(void)
27112 /* called before trim_bios_range() to spare extra sanitize */
27113 static void __init e820_add_kernel_range(void)
27114 {
27115- u64 start = __pa_symbol(_text);
27116+ u64 start = __pa_symbol(ktla_ktva(_text));
27117 u64 size = __pa_symbol(_end) - start;
27118
27119 /*
27120@@ -855,8 +858,12 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
27121
27122 void __init setup_arch(char **cmdline_p)
27123 {
27124+#ifdef CONFIG_X86_32
27125+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
27126+#else
27127 memblock_reserve(__pa_symbol(_text),
27128 (unsigned long)__bss_stop - (unsigned long)_text);
27129+#endif
27130
27131 early_reserve_initrd();
27132
27133@@ -954,16 +961,16 @@ void __init setup_arch(char **cmdline_p)
27134
27135 if (!boot_params.hdr.root_flags)
27136 root_mountflags &= ~MS_RDONLY;
27137- init_mm.start_code = (unsigned long) _text;
27138- init_mm.end_code = (unsigned long) _etext;
27139+ init_mm.start_code = ktla_ktva((unsigned long) _text);
27140+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
27141 init_mm.end_data = (unsigned long) _edata;
27142 init_mm.brk = _brk_end;
27143
27144 mpx_mm_init(&init_mm);
27145
27146- code_resource.start = __pa_symbol(_text);
27147- code_resource.end = __pa_symbol(_etext)-1;
27148- data_resource.start = __pa_symbol(_etext);
27149+ code_resource.start = __pa_symbol(ktla_ktva(_text));
27150+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
27151+ data_resource.start = __pa_symbol(_sdata);
27152 data_resource.end = __pa_symbol(_edata)-1;
27153 bss_resource.start = __pa_symbol(__bss_start);
27154 bss_resource.end = __pa_symbol(__bss_stop)-1;
27155diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
27156index e4fcb87..9c06c55 100644
27157--- a/arch/x86/kernel/setup_percpu.c
27158+++ b/arch/x86/kernel/setup_percpu.c
27159@@ -21,19 +21,17 @@
27160 #include <asm/cpu.h>
27161 #include <asm/stackprotector.h>
27162
27163-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
27164+#ifdef CONFIG_SMP
27165+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
27166 EXPORT_PER_CPU_SYMBOL(cpu_number);
27167+#endif
27168
27169-#ifdef CONFIG_X86_64
27170 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
27171-#else
27172-#define BOOT_PERCPU_OFFSET 0
27173-#endif
27174
27175 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
27176 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
27177
27178-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
27179+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
27180 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
27181 };
27182 EXPORT_SYMBOL(__per_cpu_offset);
27183@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
27184 {
27185 #ifdef CONFIG_NEED_MULTIPLE_NODES
27186 pg_data_t *last = NULL;
27187- unsigned int cpu;
27188+ int cpu;
27189
27190 for_each_possible_cpu(cpu) {
27191 int node = early_cpu_to_node(cpu);
27192@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
27193 {
27194 #ifdef CONFIG_X86_32
27195 struct desc_struct gdt;
27196+ unsigned long base = per_cpu_offset(cpu);
27197
27198- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
27199- 0x2 | DESCTYPE_S, 0x8);
27200- gdt.s = 1;
27201+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
27202+ 0x83 | DESCTYPE_S, 0xC);
27203 write_gdt_entry(get_cpu_gdt_table(cpu),
27204 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
27205 #endif
27206@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
27207 /* alrighty, percpu areas up and running */
27208 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
27209 for_each_possible_cpu(cpu) {
27210+#ifdef CONFIG_CC_STACKPROTECTOR
27211+#ifdef CONFIG_X86_32
27212+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
27213+#endif
27214+#endif
27215 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
27216 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
27217 per_cpu(cpu_number, cpu) = cpu;
27218@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
27219 */
27220 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
27221 #endif
27222+#ifdef CONFIG_CC_STACKPROTECTOR
27223+#ifdef CONFIG_X86_32
27224+ if (!cpu)
27225+ per_cpu(stack_canary.canary, cpu) = canary;
27226+#endif
27227+#endif
27228 /*
27229 * Up to this point, the boot CPU has been using .init.data
27230 * area. Reload any changed state for the boot CPU.
27231diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
27232index e504246..ba10432 100644
27233--- a/arch/x86/kernel/signal.c
27234+++ b/arch/x86/kernel/signal.c
27235@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
27236 * Align the stack pointer according to the i386 ABI,
27237 * i.e. so that on function entry ((sp + 4) & 15) == 0.
27238 */
27239- sp = ((sp + 4) & -16ul) - 4;
27240+ sp = ((sp - 12) & -16ul) - 4;
27241 #else /* !CONFIG_X86_32 */
27242 sp = round_down(sp, 16) - 8;
27243 #endif
27244@@ -298,10 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27245 }
27246
27247 if (current->mm->context.vdso)
27248- restorer = current->mm->context.vdso +
27249- selected_vdso32->sym___kernel_sigreturn;
27250+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_sigreturn);
27251 else
27252- restorer = &frame->retcode;
27253+ restorer = (void __user *)&frame->retcode;
27254 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27255 restorer = ksig->ka.sa.sa_restorer;
27256
27257@@ -315,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27258 * reasons and because gdb uses it as a signature to notice
27259 * signal handler stack frames.
27260 */
27261- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
27262+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
27263
27264 if (err)
27265 return -EFAULT;
27266@@ -362,8 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27267 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
27268
27269 /* Set up to return from userspace. */
27270- restorer = current->mm->context.vdso +
27271- selected_vdso32->sym___kernel_rt_sigreturn;
27272+ if (current->mm->context.vdso)
27273+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_rt_sigreturn);
27274+ else
27275+ restorer = (void __user *)&frame->retcode;
27276 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27277 restorer = ksig->ka.sa.sa_restorer;
27278 put_user_ex(restorer, &frame->pretcode);
27279@@ -375,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27280 * reasons and because gdb uses it as a signature to notice
27281 * signal handler stack frames.
27282 */
27283- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
27284+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
27285 } put_user_catch(err);
27286
27287 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
27288@@ -611,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27289 {
27290 int usig = signr_convert(ksig->sig);
27291 sigset_t *set = sigmask_to_save();
27292- compat_sigset_t *cset = (compat_sigset_t *) set;
27293+ sigset_t sigcopy;
27294+ compat_sigset_t *cset;
27295+
27296+ sigcopy = *set;
27297+
27298+ cset = (compat_sigset_t *) &sigcopy;
27299
27300 /* Set up the stack frame */
27301 if (is_ia32_frame()) {
27302@@ -622,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27303 } else if (is_x32_frame()) {
27304 return x32_setup_rt_frame(ksig, cset, regs);
27305 } else {
27306- return __setup_rt_frame(ksig->sig, ksig, set, regs);
27307+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
27308 }
27309 }
27310
27311diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
27312index be8e1bd..a3d93fa 100644
27313--- a/arch/x86/kernel/smp.c
27314+++ b/arch/x86/kernel/smp.c
27315@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
27316
27317 __setup("nonmi_ipi", nonmi_ipi_setup);
27318
27319-struct smp_ops smp_ops = {
27320+struct smp_ops smp_ops __read_only = {
27321 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
27322 .smp_prepare_cpus = native_smp_prepare_cpus,
27323 .smp_cpus_done = native_smp_cpus_done,
27324diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
27325index febc6aa..37d8edf 100644
27326--- a/arch/x86/kernel/smpboot.c
27327+++ b/arch/x86/kernel/smpboot.c
27328@@ -229,14 +229,17 @@ static void notrace start_secondary(void *unused)
27329
27330 enable_start_cpu0 = 0;
27331
27332-#ifdef CONFIG_X86_32
27333+ /* otherwise gcc will move up smp_processor_id before the cpu_init */
27334+ barrier();
27335+
27336 /* switch away from the initial page table */
27337+#ifdef CONFIG_PAX_PER_CPU_PGD
27338+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
27339+#else
27340 load_cr3(swapper_pg_dir);
27341+#endif
27342 __flush_tlb_all();
27343-#endif
27344
27345- /* otherwise gcc will move up smp_processor_id before the cpu_init */
27346- barrier();
27347 /*
27348 * Check TSC synchronization with the BP:
27349 */
27350@@ -800,8 +803,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27351 alternatives_enable_smp();
27352
27353 idle->thread.sp = (unsigned long) (((struct pt_regs *)
27354- (THREAD_SIZE + task_stack_page(idle))) - 1);
27355+ (THREAD_SIZE - 16 + task_stack_page(idle))) - 1);
27356 per_cpu(current_task, cpu) = idle;
27357+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27358
27359 #ifdef CONFIG_X86_32
27360 /* Stack for startup_32 can be just as for start_secondary onwards */
27361@@ -810,10 +814,10 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27362 clear_tsk_thread_flag(idle, TIF_FORK);
27363 initial_gs = per_cpu_offset(cpu);
27364 #endif
27365- per_cpu(kernel_stack, cpu) =
27366- (unsigned long)task_stack_page(idle) -
27367- KERNEL_STACK_OFFSET + THREAD_SIZE;
27368+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27369+ pax_open_kernel();
27370 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
27371+ pax_close_kernel();
27372 initial_code = (unsigned long)start_secondary;
27373 stack_start = idle->thread.sp;
27374
27375@@ -953,6 +957,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
27376 /* the FPU context is blank, nobody can own it */
27377 __cpu_disable_lazy_restore(cpu);
27378
27379+#ifdef CONFIG_PAX_PER_CPU_PGD
27380+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
27381+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27382+ KERNEL_PGD_PTRS);
27383+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
27384+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27385+ KERNEL_PGD_PTRS);
27386+#endif
27387+
27388 err = do_boot_cpu(apicid, cpu, tidle);
27389 if (err) {
27390 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
27391diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
27392index 9b4d51d..5d28b58 100644
27393--- a/arch/x86/kernel/step.c
27394+++ b/arch/x86/kernel/step.c
27395@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27396 struct desc_struct *desc;
27397 unsigned long base;
27398
27399- seg &= ~7UL;
27400+ seg >>= 3;
27401
27402 mutex_lock(&child->mm->context.lock);
27403- if (unlikely((seg >> 3) >= child->mm->context.size))
27404+ if (unlikely(seg >= child->mm->context.size))
27405 addr = -1L; /* bogus selector, access would fault */
27406 else {
27407 desc = child->mm->context.ldt + seg;
27408@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27409 addr += base;
27410 }
27411 mutex_unlock(&child->mm->context.lock);
27412- }
27413+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
27414+ addr = ktla_ktva(addr);
27415
27416 return addr;
27417 }
27418@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
27419 unsigned char opcode[15];
27420 unsigned long addr = convert_ip_to_linear(child, regs);
27421
27422+ if (addr == -EINVAL)
27423+ return 0;
27424+
27425 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
27426 for (i = 0; i < copied; i++) {
27427 switch (opcode[i]) {
27428diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
27429new file mode 100644
27430index 0000000..5877189
27431--- /dev/null
27432+++ b/arch/x86/kernel/sys_i386_32.c
27433@@ -0,0 +1,189 @@
27434+/*
27435+ * This file contains various random system calls that
27436+ * have a non-standard calling sequence on the Linux/i386
27437+ * platform.
27438+ */
27439+
27440+#include <linux/errno.h>
27441+#include <linux/sched.h>
27442+#include <linux/mm.h>
27443+#include <linux/fs.h>
27444+#include <linux/smp.h>
27445+#include <linux/sem.h>
27446+#include <linux/msg.h>
27447+#include <linux/shm.h>
27448+#include <linux/stat.h>
27449+#include <linux/syscalls.h>
27450+#include <linux/mman.h>
27451+#include <linux/file.h>
27452+#include <linux/utsname.h>
27453+#include <linux/ipc.h>
27454+#include <linux/elf.h>
27455+
27456+#include <linux/uaccess.h>
27457+#include <linux/unistd.h>
27458+
27459+#include <asm/syscalls.h>
27460+
27461+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
27462+{
27463+ unsigned long pax_task_size = TASK_SIZE;
27464+
27465+#ifdef CONFIG_PAX_SEGMEXEC
27466+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
27467+ pax_task_size = SEGMEXEC_TASK_SIZE;
27468+#endif
27469+
27470+ if (flags & MAP_FIXED)
27471+ if (len > pax_task_size || addr > pax_task_size - len)
27472+ return -EINVAL;
27473+
27474+ return 0;
27475+}
27476+
27477+/*
27478+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
27479+ */
27480+static unsigned long get_align_mask(void)
27481+{
27482+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
27483+ return 0;
27484+
27485+ if (!(current->flags & PF_RANDOMIZE))
27486+ return 0;
27487+
27488+ return va_align.mask;
27489+}
27490+
27491+unsigned long
27492+arch_get_unmapped_area(struct file *filp, unsigned long addr,
27493+ unsigned long len, unsigned long pgoff, unsigned long flags)
27494+{
27495+ struct mm_struct *mm = current->mm;
27496+ struct vm_area_struct *vma;
27497+ unsigned long pax_task_size = TASK_SIZE;
27498+ struct vm_unmapped_area_info info;
27499+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27500+
27501+#ifdef CONFIG_PAX_SEGMEXEC
27502+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27503+ pax_task_size = SEGMEXEC_TASK_SIZE;
27504+#endif
27505+
27506+ pax_task_size -= PAGE_SIZE;
27507+
27508+ if (len > pax_task_size)
27509+ return -ENOMEM;
27510+
27511+ if (flags & MAP_FIXED)
27512+ return addr;
27513+
27514+#ifdef CONFIG_PAX_RANDMMAP
27515+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27516+#endif
27517+
27518+ if (addr) {
27519+ addr = PAGE_ALIGN(addr);
27520+ if (pax_task_size - len >= addr) {
27521+ vma = find_vma(mm, addr);
27522+ if (check_heap_stack_gap(vma, addr, len, offset))
27523+ return addr;
27524+ }
27525+ }
27526+
27527+ info.flags = 0;
27528+ info.length = len;
27529+ info.align_mask = filp ? get_align_mask() : 0;
27530+ info.align_offset = pgoff << PAGE_SHIFT;
27531+ info.threadstack_offset = offset;
27532+
27533+#ifdef CONFIG_PAX_PAGEEXEC
27534+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
27535+ info.low_limit = 0x00110000UL;
27536+ info.high_limit = mm->start_code;
27537+
27538+#ifdef CONFIG_PAX_RANDMMAP
27539+ if (mm->pax_flags & MF_PAX_RANDMMAP)
27540+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
27541+#endif
27542+
27543+ if (info.low_limit < info.high_limit) {
27544+ addr = vm_unmapped_area(&info);
27545+ if (!IS_ERR_VALUE(addr))
27546+ return addr;
27547+ }
27548+ } else
27549+#endif
27550+
27551+ info.low_limit = mm->mmap_base;
27552+ info.high_limit = pax_task_size;
27553+
27554+ return vm_unmapped_area(&info);
27555+}
27556+
27557+unsigned long
27558+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27559+ const unsigned long len, const unsigned long pgoff,
27560+ const unsigned long flags)
27561+{
27562+ struct vm_area_struct *vma;
27563+ struct mm_struct *mm = current->mm;
27564+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
27565+ struct vm_unmapped_area_info info;
27566+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27567+
27568+#ifdef CONFIG_PAX_SEGMEXEC
27569+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27570+ pax_task_size = SEGMEXEC_TASK_SIZE;
27571+#endif
27572+
27573+ pax_task_size -= PAGE_SIZE;
27574+
27575+ /* requested length too big for entire address space */
27576+ if (len > pax_task_size)
27577+ return -ENOMEM;
27578+
27579+ if (flags & MAP_FIXED)
27580+ return addr;
27581+
27582+#ifdef CONFIG_PAX_PAGEEXEC
27583+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
27584+ goto bottomup;
27585+#endif
27586+
27587+#ifdef CONFIG_PAX_RANDMMAP
27588+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27589+#endif
27590+
27591+ /* requesting a specific address */
27592+ if (addr) {
27593+ addr = PAGE_ALIGN(addr);
27594+ if (pax_task_size - len >= addr) {
27595+ vma = find_vma(mm, addr);
27596+ if (check_heap_stack_gap(vma, addr, len, offset))
27597+ return addr;
27598+ }
27599+ }
27600+
27601+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
27602+ info.length = len;
27603+ info.low_limit = PAGE_SIZE;
27604+ info.high_limit = mm->mmap_base;
27605+ info.align_mask = filp ? get_align_mask() : 0;
27606+ info.align_offset = pgoff << PAGE_SHIFT;
27607+ info.threadstack_offset = offset;
27608+
27609+ addr = vm_unmapped_area(&info);
27610+ if (!(addr & ~PAGE_MASK))
27611+ return addr;
27612+ VM_BUG_ON(addr != -ENOMEM);
27613+
27614+bottomup:
27615+ /*
27616+ * A failed mmap() very likely causes application failure,
27617+ * so fall back to the bottom-up function here. This scenario
27618+ * can happen with large stack limits and large mmap()
27619+ * allocations.
27620+ */
27621+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
27622+}
27623diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
27624index 30277e2..5664a29 100644
27625--- a/arch/x86/kernel/sys_x86_64.c
27626+++ b/arch/x86/kernel/sys_x86_64.c
27627@@ -81,8 +81,8 @@ out:
27628 return error;
27629 }
27630
27631-static void find_start_end(unsigned long flags, unsigned long *begin,
27632- unsigned long *end)
27633+static void find_start_end(struct mm_struct *mm, unsigned long flags,
27634+ unsigned long *begin, unsigned long *end)
27635 {
27636 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
27637 unsigned long new_begin;
27638@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
27639 *begin = new_begin;
27640 }
27641 } else {
27642- *begin = current->mm->mmap_legacy_base;
27643+ *begin = mm->mmap_legacy_base;
27644 *end = TASK_SIZE;
27645 }
27646 }
27647@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27648 struct vm_area_struct *vma;
27649 struct vm_unmapped_area_info info;
27650 unsigned long begin, end;
27651+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27652
27653 if (flags & MAP_FIXED)
27654 return addr;
27655
27656- find_start_end(flags, &begin, &end);
27657+ find_start_end(mm, flags, &begin, &end);
27658
27659 if (len > end)
27660 return -ENOMEM;
27661
27662+#ifdef CONFIG_PAX_RANDMMAP
27663+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27664+#endif
27665+
27666 if (addr) {
27667 addr = PAGE_ALIGN(addr);
27668 vma = find_vma(mm, addr);
27669- if (end - len >= addr &&
27670- (!vma || addr + len <= vma->vm_start))
27671+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27672 return addr;
27673 }
27674
27675@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27676 info.high_limit = end;
27677 info.align_mask = filp ? get_align_mask() : 0;
27678 info.align_offset = pgoff << PAGE_SHIFT;
27679+ info.threadstack_offset = offset;
27680 return vm_unmapped_area(&info);
27681 }
27682
27683@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27684 struct mm_struct *mm = current->mm;
27685 unsigned long addr = addr0;
27686 struct vm_unmapped_area_info info;
27687+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27688
27689 /* requested length too big for entire address space */
27690 if (len > TASK_SIZE)
27691@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27692 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
27693 goto bottomup;
27694
27695+#ifdef CONFIG_PAX_RANDMMAP
27696+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27697+#endif
27698+
27699 /* requesting a specific address */
27700 if (addr) {
27701 addr = PAGE_ALIGN(addr);
27702 vma = find_vma(mm, addr);
27703- if (TASK_SIZE - len >= addr &&
27704- (!vma || addr + len <= vma->vm_start))
27705+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27706 return addr;
27707 }
27708
27709@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27710 info.high_limit = mm->mmap_base;
27711 info.align_mask = filp ? get_align_mask() : 0;
27712 info.align_offset = pgoff << PAGE_SHIFT;
27713+ info.threadstack_offset = offset;
27714 addr = vm_unmapped_area(&info);
27715 if (!(addr & ~PAGE_MASK))
27716 return addr;
27717diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
27718index 91a4496..42fc304 100644
27719--- a/arch/x86/kernel/tboot.c
27720+++ b/arch/x86/kernel/tboot.c
27721@@ -44,6 +44,7 @@
27722 #include <asm/setup.h>
27723 #include <asm/e820.h>
27724 #include <asm/io.h>
27725+#include <asm/tlbflush.h>
27726
27727 #include "../realmode/rm/wakeup.h"
27728
27729@@ -221,7 +222,7 @@ static int tboot_setup_sleep(void)
27730
27731 void tboot_shutdown(u32 shutdown_type)
27732 {
27733- void (*shutdown)(void);
27734+ void (* __noreturn shutdown)(void);
27735
27736 if (!tboot_enabled())
27737 return;
27738@@ -242,8 +243,9 @@ void tboot_shutdown(u32 shutdown_type)
27739 tboot->shutdown_type = shutdown_type;
27740
27741 switch_to_tboot_pt();
27742+ cr4_clear_bits(X86_CR4_PCIDE);
27743
27744- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
27745+ shutdown = (void *)(unsigned long)tboot->shutdown_entry;
27746 shutdown();
27747
27748 /* should not reach here */
27749@@ -310,7 +312,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
27750 return -ENODEV;
27751 }
27752
27753-static atomic_t ap_wfs_count;
27754+static atomic_unchecked_t ap_wfs_count;
27755
27756 static int tboot_wait_for_aps(int num_aps)
27757 {
27758@@ -334,9 +336,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
27759 {
27760 switch (action) {
27761 case CPU_DYING:
27762- atomic_inc(&ap_wfs_count);
27763+ atomic_inc_unchecked(&ap_wfs_count);
27764 if (num_online_cpus() == 1)
27765- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
27766+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
27767 return NOTIFY_BAD;
27768 break;
27769 }
27770@@ -422,7 +424,7 @@ static __init int tboot_late_init(void)
27771
27772 tboot_create_trampoline();
27773
27774- atomic_set(&ap_wfs_count, 0);
27775+ atomic_set_unchecked(&ap_wfs_count, 0);
27776 register_hotcpu_notifier(&tboot_cpu_notifier);
27777
27778 #ifdef CONFIG_DEBUG_FS
27779diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
27780index 25adc0e..1df4349 100644
27781--- a/arch/x86/kernel/time.c
27782+++ b/arch/x86/kernel/time.c
27783@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
27784 {
27785 unsigned long pc = instruction_pointer(regs);
27786
27787- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
27788+ if (!user_mode(regs) && in_lock_functions(pc)) {
27789 #ifdef CONFIG_FRAME_POINTER
27790- return *(unsigned long *)(regs->bp + sizeof(long));
27791+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
27792 #else
27793 unsigned long *sp =
27794 (unsigned long *)kernel_stack_pointer(regs);
27795@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
27796 * or above a saved flags. Eflags has bits 22-31 zero,
27797 * kernel addresses don't.
27798 */
27799+
27800+#ifdef CONFIG_PAX_KERNEXEC
27801+ return ktla_ktva(sp[0]);
27802+#else
27803 if (sp[0] >> 22)
27804 return sp[0];
27805 if (sp[1] >> 22)
27806 return sp[1];
27807 #endif
27808+
27809+#endif
27810 }
27811 return pc;
27812 }
27813diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
27814index 7fc5e84..c6e445a 100644
27815--- a/arch/x86/kernel/tls.c
27816+++ b/arch/x86/kernel/tls.c
27817@@ -139,6 +139,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
27818 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
27819 return -EINVAL;
27820
27821+#ifdef CONFIG_PAX_SEGMEXEC
27822+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
27823+ return -EINVAL;
27824+#endif
27825+
27826 set_tls_desc(p, idx, &info, 1);
27827
27828 return 0;
27829@@ -256,7 +261,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
27830
27831 if (kbuf)
27832 info = kbuf;
27833- else if (__copy_from_user(infobuf, ubuf, count))
27834+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
27835 return -EFAULT;
27836 else
27837 info = infobuf;
27838diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
27839index 1c113db..287b42e 100644
27840--- a/arch/x86/kernel/tracepoint.c
27841+++ b/arch/x86/kernel/tracepoint.c
27842@@ -9,11 +9,11 @@
27843 #include <linux/atomic.h>
27844
27845 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
27846-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27847+const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27848 (unsigned long) trace_idt_table };
27849
27850 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27851-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
27852+gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
27853
27854 static int trace_irq_vector_refcount;
27855 static DEFINE_MUTEX(irq_vector_mutex);
27856diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
27857index 4ff5d16..736e3e1 100644
27858--- a/arch/x86/kernel/traps.c
27859+++ b/arch/x86/kernel/traps.c
27860@@ -68,7 +68,7 @@
27861 #include <asm/proto.h>
27862
27863 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27864-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
27865+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
27866 #else
27867 #include <asm/processor-flags.h>
27868 #include <asm/setup.h>
27869@@ -77,7 +77,7 @@ asmlinkage int system_call(void);
27870 #endif
27871
27872 /* Must be page-aligned because the real IDT is used in a fixmap. */
27873-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
27874+gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
27875
27876 DECLARE_BITMAP(used_vectors, NR_VECTORS);
27877 EXPORT_SYMBOL_GPL(used_vectors);
27878@@ -112,7 +112,7 @@ enum ctx_state ist_enter(struct pt_regs *regs)
27879 {
27880 enum ctx_state prev_state;
27881
27882- if (user_mode_vm(regs)) {
27883+ if (user_mode(regs)) {
27884 /* Other than that, we're just an exception. */
27885 prev_state = exception_enter();
27886 } else {
27887@@ -146,7 +146,7 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
27888 /* Must be before exception_exit. */
27889 preempt_count_sub(HARDIRQ_OFFSET);
27890
27891- if (user_mode_vm(regs))
27892+ if (user_mode(regs))
27893 return exception_exit(prev_state);
27894 else
27895 rcu_nmi_exit();
27896@@ -158,7 +158,7 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
27897 *
27898 * IST exception handlers normally cannot schedule. As a special
27899 * exception, if the exception interrupted userspace code (i.e.
27900- * user_mode_vm(regs) would return true) and the exception was not
27901+ * user_mode(regs) would return true) and the exception was not
27902 * a double fault, it can be safe to schedule. ist_begin_non_atomic()
27903 * begins a non-atomic section within an ist_enter()/ist_exit() region.
27904 * Callers are responsible for enabling interrupts themselves inside
27905@@ -167,7 +167,7 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
27906 */
27907 void ist_begin_non_atomic(struct pt_regs *regs)
27908 {
27909- BUG_ON(!user_mode_vm(regs));
27910+ BUG_ON(!user_mode(regs));
27911
27912 /*
27913 * Sanity check: we need to be on the normal thread stack. This
27914@@ -191,11 +191,11 @@ void ist_end_non_atomic(void)
27915 }
27916
27917 static nokprobe_inline int
27918-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27919+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
27920 struct pt_regs *regs, long error_code)
27921 {
27922 #ifdef CONFIG_X86_32
27923- if (regs->flags & X86_VM_MASK) {
27924+ if (v8086_mode(regs)) {
27925 /*
27926 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
27927 * On nmi (interrupt 2), do_trap should not be called.
27928@@ -208,12 +208,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27929 return -1;
27930 }
27931 #endif
27932- if (!user_mode(regs)) {
27933+ if (!user_mode_novm(regs)) {
27934 if (!fixup_exception(regs)) {
27935 tsk->thread.error_code = error_code;
27936 tsk->thread.trap_nr = trapnr;
27937+
27938+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27939+ if (trapnr == X86_TRAP_SS && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
27940+ str = "PAX: suspicious stack segment fault";
27941+#endif
27942+
27943 die(str, regs, error_code);
27944 }
27945+
27946+#ifdef CONFIG_PAX_REFCOUNT
27947+ if (trapnr == X86_TRAP_OF)
27948+ pax_report_refcount_overflow(regs);
27949+#endif
27950+
27951 return 0;
27952 }
27953
27954@@ -252,7 +264,7 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
27955 }
27956
27957 static void
27958-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
27959+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
27960 long error_code, siginfo_t *info)
27961 {
27962 struct task_struct *tsk = current;
27963@@ -276,7 +288,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
27964 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
27965 printk_ratelimit()) {
27966 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
27967- tsk->comm, tsk->pid, str,
27968+ tsk->comm, task_pid_nr(tsk), str,
27969 regs->ip, regs->sp, error_code);
27970 print_vma_addr(" in ", regs->ip);
27971 pr_cont("\n");
27972@@ -358,6 +370,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
27973 tsk->thread.error_code = error_code;
27974 tsk->thread.trap_nr = X86_TRAP_DF;
27975
27976+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
27977+ if ((unsigned long)tsk->stack - regs->sp <= PAGE_SIZE)
27978+ die("grsec: kernel stack overflow detected", regs, error_code);
27979+#endif
27980+
27981 #ifdef CONFIG_DOUBLEFAULT
27982 df_debug(regs, error_code);
27983 #endif
27984@@ -384,7 +401,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
27985 goto exit;
27986 conditional_sti(regs);
27987
27988- if (!user_mode_vm(regs))
27989+ if (!user_mode(regs))
27990 die("bounds", regs, error_code);
27991
27992 if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
27993@@ -463,7 +480,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
27994 conditional_sti(regs);
27995
27996 #ifdef CONFIG_X86_32
27997- if (regs->flags & X86_VM_MASK) {
27998+ if (v8086_mode(regs)) {
27999 local_irq_enable();
28000 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
28001 goto exit;
28002@@ -471,18 +488,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
28003 #endif
28004
28005 tsk = current;
28006- if (!user_mode(regs)) {
28007+ if (!user_mode_novm(regs)) {
28008 if (fixup_exception(regs))
28009 goto exit;
28010
28011 tsk->thread.error_code = error_code;
28012 tsk->thread.trap_nr = X86_TRAP_GP;
28013 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
28014- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
28015+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
28016+
28017+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28018+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
28019+ die("PAX: suspicious general protection fault", regs, error_code);
28020+ else
28021+#endif
28022+
28023 die("general protection fault", regs, error_code);
28024+ }
28025 goto exit;
28026 }
28027
28028+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
28029+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
28030+ struct mm_struct *mm = tsk->mm;
28031+ unsigned long limit;
28032+
28033+ down_write(&mm->mmap_sem);
28034+ limit = mm->context.user_cs_limit;
28035+ if (limit < TASK_SIZE) {
28036+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
28037+ up_write(&mm->mmap_sem);
28038+ return;
28039+ }
28040+ up_write(&mm->mmap_sem);
28041+ }
28042+#endif
28043+
28044 tsk->thread.error_code = error_code;
28045 tsk->thread.trap_nr = X86_TRAP_GP;
28046
28047@@ -581,13 +622,16 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
28048 container_of(task_pt_regs(current),
28049 struct bad_iret_stack, regs);
28050
28051+ if ((current->thread.sp0 ^ (unsigned long)s) < THREAD_SIZE)
28052+ new_stack = s;
28053+
28054 /* Copy the IRET target to the new stack. */
28055 memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
28056
28057 /* Copy the remainder of the stack from the current stack. */
28058 memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
28059
28060- BUG_ON(!user_mode_vm(&new_stack->regs));
28061+ BUG_ON(!user_mode(&new_stack->regs));
28062 return new_stack;
28063 }
28064 NOKPROBE_SYMBOL(fixup_bad_iret);
28065@@ -637,7 +681,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28066 * then it's very likely the result of an icebp/int01 trap.
28067 * User wants a sigtrap for that.
28068 */
28069- if (!dr6 && user_mode_vm(regs))
28070+ if (!dr6 && user_mode(regs))
28071 user_icebp = 1;
28072
28073 /* Catch kmemcheck conditions first of all! */
28074@@ -673,7 +717,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28075 /* It's safe to allow irq's after DR6 has been saved */
28076 preempt_conditional_sti(regs);
28077
28078- if (regs->flags & X86_VM_MASK) {
28079+ if (v8086_mode(regs)) {
28080 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
28081 X86_TRAP_DB);
28082 preempt_conditional_cli(regs);
28083@@ -688,7 +732,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28084 * We already checked v86 mode above, so we can check for kernel mode
28085 * by just checking the CPL of CS.
28086 */
28087- if ((dr6 & DR_STEP) && !user_mode(regs)) {
28088+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
28089 tsk->thread.debugreg6 &= ~DR_STEP;
28090 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
28091 regs->flags &= ~X86_EFLAGS_TF;
28092@@ -721,7 +765,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
28093 return;
28094 conditional_sti(regs);
28095
28096- if (!user_mode_vm(regs))
28097+ if (!user_mode(regs))
28098 {
28099 if (!fixup_exception(regs)) {
28100 task->thread.error_code = error_code;
28101diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
28102index 5054497..139f8f8 100644
28103--- a/arch/x86/kernel/tsc.c
28104+++ b/arch/x86/kernel/tsc.c
28105@@ -150,7 +150,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
28106 */
28107 smp_wmb();
28108
28109- ACCESS_ONCE(c2n->head) = data;
28110+ ACCESS_ONCE_RW(c2n->head) = data;
28111 }
28112
28113 /*
28114diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
28115index 81f8adb0..fff670e 100644
28116--- a/arch/x86/kernel/uprobes.c
28117+++ b/arch/x86/kernel/uprobes.c
28118@@ -912,7 +912,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
28119 int ret = NOTIFY_DONE;
28120
28121 /* We are only interested in userspace traps */
28122- if (regs && !user_mode_vm(regs))
28123+ if (regs && !user_mode(regs))
28124 return NOTIFY_DONE;
28125
28126 switch (val) {
28127@@ -986,7 +986,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
28128
28129 if (nleft != rasize) {
28130 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
28131- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
28132+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
28133
28134 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
28135 }
28136diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
28137index b9242ba..50c5edd 100644
28138--- a/arch/x86/kernel/verify_cpu.S
28139+++ b/arch/x86/kernel/verify_cpu.S
28140@@ -20,6 +20,7 @@
28141 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
28142 * arch/x86/kernel/trampoline_64.S: secondary processor verification
28143 * arch/x86/kernel/head_32.S: processor startup
28144+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
28145 *
28146 * verify_cpu, returns the status of longmode and SSE in register %eax.
28147 * 0: Success 1: Failure
28148diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
28149index e8edcf5..27f9344 100644
28150--- a/arch/x86/kernel/vm86_32.c
28151+++ b/arch/x86/kernel/vm86_32.c
28152@@ -44,6 +44,7 @@
28153 #include <linux/ptrace.h>
28154 #include <linux/audit.h>
28155 #include <linux/stddef.h>
28156+#include <linux/grsecurity.h>
28157
28158 #include <asm/uaccess.h>
28159 #include <asm/io.h>
28160@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
28161 do_exit(SIGSEGV);
28162 }
28163
28164- tss = &per_cpu(init_tss, get_cpu());
28165+ tss = init_tss + get_cpu();
28166 current->thread.sp0 = current->thread.saved_sp0;
28167 current->thread.sysenter_cs = __KERNEL_CS;
28168 load_sp0(tss, &current->thread);
28169@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
28170
28171 if (tsk->thread.saved_sp0)
28172 return -EPERM;
28173+
28174+#ifdef CONFIG_GRKERNSEC_VM86
28175+ if (!capable(CAP_SYS_RAWIO)) {
28176+ gr_handle_vm86();
28177+ return -EPERM;
28178+ }
28179+#endif
28180+
28181 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
28182 offsetof(struct kernel_vm86_struct, vm86plus) -
28183 sizeof(info.regs));
28184@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
28185 int tmp;
28186 struct vm86plus_struct __user *v86;
28187
28188+#ifdef CONFIG_GRKERNSEC_VM86
28189+ if (!capable(CAP_SYS_RAWIO)) {
28190+ gr_handle_vm86();
28191+ return -EPERM;
28192+ }
28193+#endif
28194+
28195 tsk = current;
28196 switch (cmd) {
28197 case VM86_REQUEST_IRQ:
28198@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
28199 tsk->thread.saved_fs = info->regs32->fs;
28200 tsk->thread.saved_gs = get_user_gs(info->regs32);
28201
28202- tss = &per_cpu(init_tss, get_cpu());
28203+ tss = init_tss + get_cpu();
28204 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
28205 if (cpu_has_sep)
28206 tsk->thread.sysenter_cs = 0;
28207@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
28208 goto cannot_handle;
28209 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
28210 goto cannot_handle;
28211- intr_ptr = (unsigned long __user *) (i << 2);
28212+ intr_ptr = (__force unsigned long __user *) (i << 2);
28213 if (get_user(segoffs, intr_ptr))
28214 goto cannot_handle;
28215 if ((segoffs >> 16) == BIOSSEG)
28216diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
28217index 00bf300..129df8e 100644
28218--- a/arch/x86/kernel/vmlinux.lds.S
28219+++ b/arch/x86/kernel/vmlinux.lds.S
28220@@ -26,6 +26,13 @@
28221 #include <asm/page_types.h>
28222 #include <asm/cache.h>
28223 #include <asm/boot.h>
28224+#include <asm/segment.h>
28225+
28226+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28227+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
28228+#else
28229+#define __KERNEL_TEXT_OFFSET 0
28230+#endif
28231
28232 #undef i386 /* in case the preprocessor is a 32bit one */
28233
28234@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
28235
28236 PHDRS {
28237 text PT_LOAD FLAGS(5); /* R_E */
28238+#ifdef CONFIG_X86_32
28239+ module PT_LOAD FLAGS(5); /* R_E */
28240+#endif
28241+#ifdef CONFIG_XEN
28242+ rodata PT_LOAD FLAGS(5); /* R_E */
28243+#else
28244+ rodata PT_LOAD FLAGS(4); /* R__ */
28245+#endif
28246 data PT_LOAD FLAGS(6); /* RW_ */
28247-#ifdef CONFIG_X86_64
28248+ init.begin PT_LOAD FLAGS(6); /* RW_ */
28249 #ifdef CONFIG_SMP
28250 percpu PT_LOAD FLAGS(6); /* RW_ */
28251 #endif
28252+ text.init PT_LOAD FLAGS(5); /* R_E */
28253+ text.exit PT_LOAD FLAGS(5); /* R_E */
28254 init PT_LOAD FLAGS(7); /* RWE */
28255-#endif
28256 note PT_NOTE FLAGS(0); /* ___ */
28257 }
28258
28259 SECTIONS
28260 {
28261 #ifdef CONFIG_X86_32
28262- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
28263- phys_startup_32 = startup_32 - LOAD_OFFSET;
28264+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
28265 #else
28266- . = __START_KERNEL;
28267- phys_startup_64 = startup_64 - LOAD_OFFSET;
28268+ . = __START_KERNEL;
28269 #endif
28270
28271 /* Text and read-only data */
28272- .text : AT(ADDR(.text) - LOAD_OFFSET) {
28273- _text = .;
28274+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28275 /* bootstrapping code */
28276+#ifdef CONFIG_X86_32
28277+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28278+#else
28279+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28280+#endif
28281+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28282+ _text = .;
28283 HEAD_TEXT
28284 . = ALIGN(8);
28285 _stext = .;
28286@@ -104,13 +124,47 @@ SECTIONS
28287 IRQENTRY_TEXT
28288 *(.fixup)
28289 *(.gnu.warning)
28290- /* End of text section */
28291- _etext = .;
28292 } :text = 0x9090
28293
28294- NOTES :text :note
28295+ . += __KERNEL_TEXT_OFFSET;
28296
28297- EXCEPTION_TABLE(16) :text = 0x9090
28298+#ifdef CONFIG_X86_32
28299+ . = ALIGN(PAGE_SIZE);
28300+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
28301+
28302+#ifdef CONFIG_PAX_KERNEXEC
28303+ MODULES_EXEC_VADDR = .;
28304+ BYTE(0)
28305+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
28306+ . = ALIGN(HPAGE_SIZE) - 1;
28307+ MODULES_EXEC_END = .;
28308+#endif
28309+
28310+ } :module
28311+#endif
28312+
28313+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
28314+ /* End of text section */
28315+ BYTE(0)
28316+ _etext = . - __KERNEL_TEXT_OFFSET;
28317+ }
28318+
28319+#ifdef CONFIG_X86_32
28320+ . = ALIGN(PAGE_SIZE);
28321+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
28322+ . = ALIGN(PAGE_SIZE);
28323+ *(.empty_zero_page)
28324+ *(.initial_pg_fixmap)
28325+ *(.initial_pg_pmd)
28326+ *(.initial_page_table)
28327+ *(.swapper_pg_dir)
28328+ } :rodata
28329+#endif
28330+
28331+ . = ALIGN(PAGE_SIZE);
28332+ NOTES :rodata :note
28333+
28334+ EXCEPTION_TABLE(16) :rodata
28335
28336 #if defined(CONFIG_DEBUG_RODATA)
28337 /* .text should occupy whole number of pages */
28338@@ -122,16 +176,20 @@ SECTIONS
28339
28340 /* Data */
28341 .data : AT(ADDR(.data) - LOAD_OFFSET) {
28342+
28343+#ifdef CONFIG_PAX_KERNEXEC
28344+ . = ALIGN(HPAGE_SIZE);
28345+#else
28346+ . = ALIGN(PAGE_SIZE);
28347+#endif
28348+
28349 /* Start of data section */
28350 _sdata = .;
28351
28352 /* init_task */
28353 INIT_TASK_DATA(THREAD_SIZE)
28354
28355-#ifdef CONFIG_X86_32
28356- /* 32 bit has nosave before _edata */
28357 NOSAVE_DATA
28358-#endif
28359
28360 PAGE_ALIGNED_DATA(PAGE_SIZE)
28361
28362@@ -174,12 +232,19 @@ SECTIONS
28363 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
28364
28365 /* Init code and data - will be freed after init */
28366- . = ALIGN(PAGE_SIZE);
28367 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
28368+ BYTE(0)
28369+
28370+#ifdef CONFIG_PAX_KERNEXEC
28371+ . = ALIGN(HPAGE_SIZE);
28372+#else
28373+ . = ALIGN(PAGE_SIZE);
28374+#endif
28375+
28376 __init_begin = .; /* paired with __init_end */
28377- }
28378+ } :init.begin
28379
28380-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
28381+#ifdef CONFIG_SMP
28382 /*
28383 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
28384 * output PHDR, so the next output section - .init.text - should
28385@@ -190,12 +255,27 @@ SECTIONS
28386 "per-CPU data too large - increase CONFIG_PHYSICAL_START")
28387 #endif
28388
28389- INIT_TEXT_SECTION(PAGE_SIZE)
28390-#ifdef CONFIG_X86_64
28391- :init
28392-#endif
28393+ . = ALIGN(PAGE_SIZE);
28394+ init_begin = .;
28395+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
28396+ VMLINUX_SYMBOL(_sinittext) = .;
28397+ INIT_TEXT
28398+ . = ALIGN(PAGE_SIZE);
28399+ } :text.init
28400
28401- INIT_DATA_SECTION(16)
28402+ /*
28403+ * .exit.text is discard at runtime, not link time, to deal with
28404+ * references from .altinstructions and .eh_frame
28405+ */
28406+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28407+ EXIT_TEXT
28408+ VMLINUX_SYMBOL(_einittext) = .;
28409+ . = ALIGN(16);
28410+ } :text.exit
28411+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
28412+
28413+ . = ALIGN(PAGE_SIZE);
28414+ INIT_DATA_SECTION(16) :init
28415
28416 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
28417 __x86_cpu_dev_start = .;
28418@@ -266,19 +346,12 @@ SECTIONS
28419 }
28420
28421 . = ALIGN(8);
28422- /*
28423- * .exit.text is discard at runtime, not link time, to deal with
28424- * references from .altinstructions and .eh_frame
28425- */
28426- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
28427- EXIT_TEXT
28428- }
28429
28430 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
28431 EXIT_DATA
28432 }
28433
28434-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
28435+#ifndef CONFIG_SMP
28436 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
28437 #endif
28438
28439@@ -297,16 +370,10 @@ SECTIONS
28440 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
28441 __smp_locks = .;
28442 *(.smp_locks)
28443- . = ALIGN(PAGE_SIZE);
28444 __smp_locks_end = .;
28445+ . = ALIGN(PAGE_SIZE);
28446 }
28447
28448-#ifdef CONFIG_X86_64
28449- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
28450- NOSAVE_DATA
28451- }
28452-#endif
28453-
28454 /* BSS */
28455 . = ALIGN(PAGE_SIZE);
28456 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
28457@@ -322,6 +389,7 @@ SECTIONS
28458 __brk_base = .;
28459 . += 64 * 1024; /* 64k alignment slop space */
28460 *(.brk_reservation) /* areas brk users have reserved */
28461+ . = ALIGN(HPAGE_SIZE);
28462 __brk_limit = .;
28463 }
28464
28465@@ -348,13 +416,12 @@ SECTIONS
28466 * for the boot processor.
28467 */
28468 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
28469-INIT_PER_CPU(gdt_page);
28470 INIT_PER_CPU(irq_stack_union);
28471
28472 /*
28473 * Build-time check on the image size:
28474 */
28475-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
28476+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
28477 "kernel image bigger than KERNEL_IMAGE_SIZE");
28478
28479 #ifdef CONFIG_SMP
28480diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
28481index 2dcc6ff..082dc7a 100644
28482--- a/arch/x86/kernel/vsyscall_64.c
28483+++ b/arch/x86/kernel/vsyscall_64.c
28484@@ -38,15 +38,13 @@
28485 #define CREATE_TRACE_POINTS
28486 #include "vsyscall_trace.h"
28487
28488-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
28489+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
28490
28491 static int __init vsyscall_setup(char *str)
28492 {
28493 if (str) {
28494 if (!strcmp("emulate", str))
28495 vsyscall_mode = EMULATE;
28496- else if (!strcmp("native", str))
28497- vsyscall_mode = NATIVE;
28498 else if (!strcmp("none", str))
28499 vsyscall_mode = NONE;
28500 else
28501@@ -264,8 +262,7 @@ do_ret:
28502 return true;
28503
28504 sigsegv:
28505- force_sig(SIGSEGV, current);
28506- return true;
28507+ do_group_exit(SIGKILL);
28508 }
28509
28510 /*
28511@@ -283,8 +280,8 @@ static struct vm_operations_struct gate_vma_ops = {
28512 static struct vm_area_struct gate_vma = {
28513 .vm_start = VSYSCALL_ADDR,
28514 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
28515- .vm_page_prot = PAGE_READONLY_EXEC,
28516- .vm_flags = VM_READ | VM_EXEC,
28517+ .vm_page_prot = PAGE_READONLY,
28518+ .vm_flags = VM_READ,
28519 .vm_ops = &gate_vma_ops,
28520 };
28521
28522@@ -325,10 +322,7 @@ void __init map_vsyscall(void)
28523 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
28524
28525 if (vsyscall_mode != NONE)
28526- __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
28527- vsyscall_mode == NATIVE
28528- ? PAGE_KERNEL_VSYSCALL
28529- : PAGE_KERNEL_VVAR);
28530+ __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
28531
28532 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
28533 (unsigned long)VSYSCALL_ADDR);
28534diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
28535index 37d8fa4..66e319a 100644
28536--- a/arch/x86/kernel/x8664_ksyms_64.c
28537+++ b/arch/x86/kernel/x8664_ksyms_64.c
28538@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
28539 EXPORT_SYMBOL(copy_user_generic_unrolled);
28540 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
28541 EXPORT_SYMBOL(__copy_user_nocache);
28542-EXPORT_SYMBOL(_copy_from_user);
28543-EXPORT_SYMBOL(_copy_to_user);
28544
28545 EXPORT_SYMBOL(copy_page);
28546 EXPORT_SYMBOL(clear_page);
28547@@ -79,3 +77,7 @@ EXPORT_SYMBOL(___preempt_schedule);
28548 EXPORT_SYMBOL(___preempt_schedule_context);
28549 #endif
28550 #endif
28551+
28552+#ifdef CONFIG_PAX_PER_CPU_PGD
28553+EXPORT_SYMBOL(cpu_pgd);
28554+#endif
28555diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
28556index 234b072..b7ab191 100644
28557--- a/arch/x86/kernel/x86_init.c
28558+++ b/arch/x86/kernel/x86_init.c
28559@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
28560 static void default_nmi_init(void) { };
28561 static int default_i8042_detect(void) { return 1; };
28562
28563-struct x86_platform_ops x86_platform = {
28564+struct x86_platform_ops x86_platform __read_only = {
28565 .calibrate_tsc = native_calibrate_tsc,
28566 .get_wallclock = mach_get_cmos_time,
28567 .set_wallclock = mach_set_rtc_mmss,
28568@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
28569 EXPORT_SYMBOL_GPL(x86_platform);
28570
28571 #if defined(CONFIG_PCI_MSI)
28572-struct x86_msi_ops x86_msi = {
28573+struct x86_msi_ops x86_msi __read_only = {
28574 .setup_msi_irqs = native_setup_msi_irqs,
28575 .compose_msi_msg = native_compose_msi_msg,
28576 .teardown_msi_irq = native_teardown_msi_irq,
28577@@ -140,7 +140,7 @@ void arch_restore_msi_irqs(struct pci_dev *dev)
28578 }
28579 #endif
28580
28581-struct x86_io_apic_ops x86_io_apic_ops = {
28582+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
28583 .init = native_io_apic_init_mappings,
28584 .read = native_io_apic_read,
28585 .write = native_io_apic_write,
28586diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
28587index cdc6cf9..e04f495 100644
28588--- a/arch/x86/kernel/xsave.c
28589+++ b/arch/x86/kernel/xsave.c
28590@@ -168,18 +168,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28591
28592 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
28593 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
28594- err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28595+ err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28596
28597 if (!use_xsave())
28598 return err;
28599
28600- err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
28601+ err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
28602
28603 /*
28604 * Read the xstate_bv which we copied (directly from the cpu or
28605 * from the state in task struct) to the user buffers.
28606 */
28607- err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28608+ err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28609
28610 /*
28611 * For legacy compatible, we always set FP/SSE bits in the bit
28612@@ -194,7 +194,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28613 */
28614 xstate_bv |= XSTATE_FPSSE;
28615
28616- err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28617+ err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28618
28619 return err;
28620 }
28621@@ -203,6 +203,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
28622 {
28623 int err;
28624
28625+ buf = (struct xsave_struct __user *)____m(buf);
28626 if (use_xsave())
28627 err = xsave_user(buf);
28628 else if (use_fxsr())
28629@@ -313,6 +314,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
28630 */
28631 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
28632 {
28633+ buf = (void __user *)____m(buf);
28634 if (use_xsave()) {
28635 if ((unsigned long)buf % 64 || fx_only) {
28636 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
28637diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
28638index 8a80737..bac4961 100644
28639--- a/arch/x86/kvm/cpuid.c
28640+++ b/arch/x86/kvm/cpuid.c
28641@@ -182,15 +182,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
28642 struct kvm_cpuid2 *cpuid,
28643 struct kvm_cpuid_entry2 __user *entries)
28644 {
28645- int r;
28646+ int r, i;
28647
28648 r = -E2BIG;
28649 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
28650 goto out;
28651 r = -EFAULT;
28652- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
28653- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28654+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28655 goto out;
28656+ for (i = 0; i < cpuid->nent; ++i) {
28657+ struct kvm_cpuid_entry2 cpuid_entry;
28658+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
28659+ goto out;
28660+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
28661+ }
28662 vcpu->arch.cpuid_nent = cpuid->nent;
28663 kvm_apic_set_version(vcpu);
28664 kvm_x86_ops->cpuid_update(vcpu);
28665@@ -203,15 +208,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
28666 struct kvm_cpuid2 *cpuid,
28667 struct kvm_cpuid_entry2 __user *entries)
28668 {
28669- int r;
28670+ int r, i;
28671
28672 r = -E2BIG;
28673 if (cpuid->nent < vcpu->arch.cpuid_nent)
28674 goto out;
28675 r = -EFAULT;
28676- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
28677- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28678+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28679 goto out;
28680+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
28681+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
28682+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
28683+ goto out;
28684+ }
28685 return 0;
28686
28687 out:
28688diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
28689index 106c015..2db7161 100644
28690--- a/arch/x86/kvm/emulate.c
28691+++ b/arch/x86/kvm/emulate.c
28692@@ -3572,7 +3572,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
28693 int cr = ctxt->modrm_reg;
28694 u64 efer = 0;
28695
28696- static u64 cr_reserved_bits[] = {
28697+ static const u64 cr_reserved_bits[] = {
28698 0xffffffff00000000ULL,
28699 0, 0, 0, /* CR3 checked later */
28700 CR4_RESERVED_BITS,
28701diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
28702index 4ee827d..a14eff9 100644
28703--- a/arch/x86/kvm/lapic.c
28704+++ b/arch/x86/kvm/lapic.c
28705@@ -56,7 +56,7 @@
28706 #define APIC_BUS_CYCLE_NS 1
28707
28708 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
28709-#define apic_debug(fmt, arg...)
28710+#define apic_debug(fmt, arg...) do {} while (0)
28711
28712 #define APIC_LVT_NUM 6
28713 /* 14 is the version for Xeon and Pentium 8.4.8*/
28714diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
28715index fd49c86..77e1aa0 100644
28716--- a/arch/x86/kvm/paging_tmpl.h
28717+++ b/arch/x86/kvm/paging_tmpl.h
28718@@ -343,7 +343,7 @@ retry_walk:
28719 if (unlikely(kvm_is_error_hva(host_addr)))
28720 goto error;
28721
28722- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
28723+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
28724 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
28725 goto error;
28726 walker->ptep_user[walker->level - 1] = ptep_user;
28727diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
28728index cc618c8..3f72f76 100644
28729--- a/arch/x86/kvm/svm.c
28730+++ b/arch/x86/kvm/svm.c
28731@@ -3568,7 +3568,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
28732 int cpu = raw_smp_processor_id();
28733
28734 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
28735+
28736+ pax_open_kernel();
28737 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
28738+ pax_close_kernel();
28739+
28740 load_TR_desc();
28741 }
28742
28743@@ -3964,6 +3968,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
28744 #endif
28745 #endif
28746
28747+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
28748+ __set_fs(current_thread_info()->addr_limit);
28749+#endif
28750+
28751 reload_tss(vcpu);
28752
28753 local_irq_disable();
28754diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
28755index a60bd3a..748e856 100644
28756--- a/arch/x86/kvm/vmx.c
28757+++ b/arch/x86/kvm/vmx.c
28758@@ -1440,12 +1440,12 @@ static void vmcs_write64(unsigned long field, u64 value)
28759 #endif
28760 }
28761
28762-static void vmcs_clear_bits(unsigned long field, u32 mask)
28763+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
28764 {
28765 vmcs_writel(field, vmcs_readl(field) & ~mask);
28766 }
28767
28768-static void vmcs_set_bits(unsigned long field, u32 mask)
28769+static void vmcs_set_bits(unsigned long field, unsigned long mask)
28770 {
28771 vmcs_writel(field, vmcs_readl(field) | mask);
28772 }
28773@@ -1705,7 +1705,11 @@ static void reload_tss(void)
28774 struct desc_struct *descs;
28775
28776 descs = (void *)gdt->address;
28777+
28778+ pax_open_kernel();
28779 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
28780+ pax_close_kernel();
28781+
28782 load_TR_desc();
28783 }
28784
28785@@ -1941,6 +1945,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
28786 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
28787 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
28788
28789+#ifdef CONFIG_PAX_PER_CPU_PGD
28790+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28791+#endif
28792+
28793 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
28794 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
28795 vmx->loaded_vmcs->cpu = cpu;
28796@@ -2233,7 +2241,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
28797 * reads and returns guest's timestamp counter "register"
28798 * guest_tsc = host_tsc + tsc_offset -- 21.3
28799 */
28800-static u64 guest_read_tsc(void)
28801+static u64 __intentional_overflow(-1) guest_read_tsc(void)
28802 {
28803 u64 host_tsc, tsc_offset;
28804
28805@@ -4466,7 +4474,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28806 unsigned long cr4;
28807
28808 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
28809+
28810+#ifndef CONFIG_PAX_PER_CPU_PGD
28811 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28812+#endif
28813
28814 /* Save the most likely value for this task's CR4 in the VMCS. */
28815 cr4 = cr4_read_shadow();
28816@@ -4493,7 +4504,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28817 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
28818 vmx->host_idt_base = dt.address;
28819
28820- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
28821+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
28822
28823 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
28824 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
28825@@ -6104,11 +6115,17 @@ static __init int hardware_setup(void)
28826 * page upon invalidation. No need to do anything if not
28827 * using the APIC_ACCESS_ADDR VMCS field.
28828 */
28829- if (!flexpriority_enabled)
28830- kvm_x86_ops->set_apic_access_page_addr = NULL;
28831+ if (!flexpriority_enabled) {
28832+ pax_open_kernel();
28833+ *(void **)&kvm_x86_ops->set_apic_access_page_addr = NULL;
28834+ pax_close_kernel();
28835+ }
28836
28837- if (!cpu_has_vmx_tpr_shadow())
28838- kvm_x86_ops->update_cr8_intercept = NULL;
28839+ if (!cpu_has_vmx_tpr_shadow()) {
28840+ pax_open_kernel();
28841+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28842+ pax_close_kernel();
28843+ }
28844
28845 if (enable_ept && !cpu_has_vmx_ept_2m_page())
28846 kvm_disable_largepages();
28847@@ -6119,14 +6136,16 @@ static __init int hardware_setup(void)
28848 if (!cpu_has_vmx_apicv())
28849 enable_apicv = 0;
28850
28851+ pax_open_kernel();
28852 if (enable_apicv)
28853- kvm_x86_ops->update_cr8_intercept = NULL;
28854+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28855 else {
28856- kvm_x86_ops->hwapic_irr_update = NULL;
28857- kvm_x86_ops->hwapic_isr_update = NULL;
28858- kvm_x86_ops->deliver_posted_interrupt = NULL;
28859- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28860+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
28861+ *(void **)&kvm_x86_ops->hwapic_isr_update = NULL;
28862+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
28863+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28864 }
28865+ pax_close_kernel();
28866
28867 vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
28868 vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
28869@@ -6179,10 +6198,12 @@ static __init int hardware_setup(void)
28870 enable_pml = 0;
28871
28872 if (!enable_pml) {
28873- kvm_x86_ops->slot_enable_log_dirty = NULL;
28874- kvm_x86_ops->slot_disable_log_dirty = NULL;
28875- kvm_x86_ops->flush_log_dirty = NULL;
28876- kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
28877+ pax_open_kernel();
28878+ *(void **)&kvm_x86_ops->slot_enable_log_dirty = NULL;
28879+ *(void **)&kvm_x86_ops->slot_disable_log_dirty = NULL;
28880+ *(void **)&kvm_x86_ops->flush_log_dirty = NULL;
28881+ *(void **)&kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
28882+ pax_close_kernel();
28883 }
28884
28885 return alloc_kvm_area();
28886@@ -8227,6 +8248,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28887 "jmp 2f \n\t"
28888 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
28889 "2: "
28890+
28891+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28892+ "ljmp %[cs],$3f\n\t"
28893+ "3: "
28894+#endif
28895+
28896 /* Save guest registers, load host registers, keep flags */
28897 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
28898 "pop %0 \n\t"
28899@@ -8279,6 +8306,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28900 #endif
28901 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
28902 [wordsize]"i"(sizeof(ulong))
28903+
28904+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28905+ ,[cs]"i"(__KERNEL_CS)
28906+#endif
28907+
28908 : "cc", "memory"
28909 #ifdef CONFIG_X86_64
28910 , "rax", "rbx", "rdi", "rsi"
28911@@ -8292,7 +8324,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28912 if (debugctlmsr)
28913 update_debugctlmsr(debugctlmsr);
28914
28915-#ifndef CONFIG_X86_64
28916+#ifdef CONFIG_X86_32
28917 /*
28918 * The sysexit path does not restore ds/es, so we must set them to
28919 * a reasonable value ourselves.
28920@@ -8301,8 +8333,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28921 * may be executed in interrupt context, which saves and restore segments
28922 * around it, nullifying its effect.
28923 */
28924- loadsegment(ds, __USER_DS);
28925- loadsegment(es, __USER_DS);
28926+ loadsegment(ds, __KERNEL_DS);
28927+ loadsegment(es, __KERNEL_DS);
28928+ loadsegment(ss, __KERNEL_DS);
28929+
28930+#ifdef CONFIG_PAX_KERNEXEC
28931+ loadsegment(fs, __KERNEL_PERCPU);
28932+#endif
28933+
28934+#ifdef CONFIG_PAX_MEMORY_UDEREF
28935+ __set_fs(current_thread_info()->addr_limit);
28936+#endif
28937+
28938 #endif
28939
28940 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
28941diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
28942index e222ba5..6f0f2de 100644
28943--- a/arch/x86/kvm/x86.c
28944+++ b/arch/x86/kvm/x86.c
28945@@ -1897,8 +1897,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
28946 {
28947 struct kvm *kvm = vcpu->kvm;
28948 int lm = is_long_mode(vcpu);
28949- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28950- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28951+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28952+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28953 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
28954 : kvm->arch.xen_hvm_config.blob_size_32;
28955 u32 page_num = data & ~PAGE_MASK;
28956@@ -2835,6 +2835,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
28957 if (n < msr_list.nmsrs)
28958 goto out;
28959 r = -EFAULT;
28960+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
28961+ goto out;
28962 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
28963 num_msrs_to_save * sizeof(u32)))
28964 goto out;
28965@@ -5739,7 +5741,7 @@ static struct notifier_block pvclock_gtod_notifier = {
28966 };
28967 #endif
28968
28969-int kvm_arch_init(void *opaque)
28970+int kvm_arch_init(const void *opaque)
28971 {
28972 int r;
28973 struct kvm_x86_ops *ops = opaque;
28974diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
28975index ac4453d..1f43bf3 100644
28976--- a/arch/x86/lguest/boot.c
28977+++ b/arch/x86/lguest/boot.c
28978@@ -1340,9 +1340,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
28979 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
28980 * Launcher to reboot us.
28981 */
28982-static void lguest_restart(char *reason)
28983+static __noreturn void lguest_restart(char *reason)
28984 {
28985 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
28986+ BUG();
28987 }
28988
28989 /*G:050
28990diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
28991index 00933d5..3a64af9 100644
28992--- a/arch/x86/lib/atomic64_386_32.S
28993+++ b/arch/x86/lib/atomic64_386_32.S
28994@@ -48,6 +48,10 @@ BEGIN(read)
28995 movl (v), %eax
28996 movl 4(v), %edx
28997 RET_ENDP
28998+BEGIN(read_unchecked)
28999+ movl (v), %eax
29000+ movl 4(v), %edx
29001+RET_ENDP
29002 #undef v
29003
29004 #define v %esi
29005@@ -55,6 +59,10 @@ BEGIN(set)
29006 movl %ebx, (v)
29007 movl %ecx, 4(v)
29008 RET_ENDP
29009+BEGIN(set_unchecked)
29010+ movl %ebx, (v)
29011+ movl %ecx, 4(v)
29012+RET_ENDP
29013 #undef v
29014
29015 #define v %esi
29016@@ -70,6 +78,20 @@ RET_ENDP
29017 BEGIN(add)
29018 addl %eax, (v)
29019 adcl %edx, 4(v)
29020+
29021+#ifdef CONFIG_PAX_REFCOUNT
29022+ jno 0f
29023+ subl %eax, (v)
29024+ sbbl %edx, 4(v)
29025+ int $4
29026+0:
29027+ _ASM_EXTABLE(0b, 0b)
29028+#endif
29029+
29030+RET_ENDP
29031+BEGIN(add_unchecked)
29032+ addl %eax, (v)
29033+ adcl %edx, 4(v)
29034 RET_ENDP
29035 #undef v
29036
29037@@ -77,6 +99,24 @@ RET_ENDP
29038 BEGIN(add_return)
29039 addl (v), %eax
29040 adcl 4(v), %edx
29041+
29042+#ifdef CONFIG_PAX_REFCOUNT
29043+ into
29044+1234:
29045+ _ASM_EXTABLE(1234b, 2f)
29046+#endif
29047+
29048+ movl %eax, (v)
29049+ movl %edx, 4(v)
29050+
29051+#ifdef CONFIG_PAX_REFCOUNT
29052+2:
29053+#endif
29054+
29055+RET_ENDP
29056+BEGIN(add_return_unchecked)
29057+ addl (v), %eax
29058+ adcl 4(v), %edx
29059 movl %eax, (v)
29060 movl %edx, 4(v)
29061 RET_ENDP
29062@@ -86,6 +126,20 @@ RET_ENDP
29063 BEGIN(sub)
29064 subl %eax, (v)
29065 sbbl %edx, 4(v)
29066+
29067+#ifdef CONFIG_PAX_REFCOUNT
29068+ jno 0f
29069+ addl %eax, (v)
29070+ adcl %edx, 4(v)
29071+ int $4
29072+0:
29073+ _ASM_EXTABLE(0b, 0b)
29074+#endif
29075+
29076+RET_ENDP
29077+BEGIN(sub_unchecked)
29078+ subl %eax, (v)
29079+ sbbl %edx, 4(v)
29080 RET_ENDP
29081 #undef v
29082
29083@@ -96,6 +150,27 @@ BEGIN(sub_return)
29084 sbbl $0, %edx
29085 addl (v), %eax
29086 adcl 4(v), %edx
29087+
29088+#ifdef CONFIG_PAX_REFCOUNT
29089+ into
29090+1234:
29091+ _ASM_EXTABLE(1234b, 2f)
29092+#endif
29093+
29094+ movl %eax, (v)
29095+ movl %edx, 4(v)
29096+
29097+#ifdef CONFIG_PAX_REFCOUNT
29098+2:
29099+#endif
29100+
29101+RET_ENDP
29102+BEGIN(sub_return_unchecked)
29103+ negl %edx
29104+ negl %eax
29105+ sbbl $0, %edx
29106+ addl (v), %eax
29107+ adcl 4(v), %edx
29108 movl %eax, (v)
29109 movl %edx, 4(v)
29110 RET_ENDP
29111@@ -105,6 +180,20 @@ RET_ENDP
29112 BEGIN(inc)
29113 addl $1, (v)
29114 adcl $0, 4(v)
29115+
29116+#ifdef CONFIG_PAX_REFCOUNT
29117+ jno 0f
29118+ subl $1, (v)
29119+ sbbl $0, 4(v)
29120+ int $4
29121+0:
29122+ _ASM_EXTABLE(0b, 0b)
29123+#endif
29124+
29125+RET_ENDP
29126+BEGIN(inc_unchecked)
29127+ addl $1, (v)
29128+ adcl $0, 4(v)
29129 RET_ENDP
29130 #undef v
29131
29132@@ -114,6 +203,26 @@ BEGIN(inc_return)
29133 movl 4(v), %edx
29134 addl $1, %eax
29135 adcl $0, %edx
29136+
29137+#ifdef CONFIG_PAX_REFCOUNT
29138+ into
29139+1234:
29140+ _ASM_EXTABLE(1234b, 2f)
29141+#endif
29142+
29143+ movl %eax, (v)
29144+ movl %edx, 4(v)
29145+
29146+#ifdef CONFIG_PAX_REFCOUNT
29147+2:
29148+#endif
29149+
29150+RET_ENDP
29151+BEGIN(inc_return_unchecked)
29152+ movl (v), %eax
29153+ movl 4(v), %edx
29154+ addl $1, %eax
29155+ adcl $0, %edx
29156 movl %eax, (v)
29157 movl %edx, 4(v)
29158 RET_ENDP
29159@@ -123,6 +232,20 @@ RET_ENDP
29160 BEGIN(dec)
29161 subl $1, (v)
29162 sbbl $0, 4(v)
29163+
29164+#ifdef CONFIG_PAX_REFCOUNT
29165+ jno 0f
29166+ addl $1, (v)
29167+ adcl $0, 4(v)
29168+ int $4
29169+0:
29170+ _ASM_EXTABLE(0b, 0b)
29171+#endif
29172+
29173+RET_ENDP
29174+BEGIN(dec_unchecked)
29175+ subl $1, (v)
29176+ sbbl $0, 4(v)
29177 RET_ENDP
29178 #undef v
29179
29180@@ -132,6 +255,26 @@ BEGIN(dec_return)
29181 movl 4(v), %edx
29182 subl $1, %eax
29183 sbbl $0, %edx
29184+
29185+#ifdef CONFIG_PAX_REFCOUNT
29186+ into
29187+1234:
29188+ _ASM_EXTABLE(1234b, 2f)
29189+#endif
29190+
29191+ movl %eax, (v)
29192+ movl %edx, 4(v)
29193+
29194+#ifdef CONFIG_PAX_REFCOUNT
29195+2:
29196+#endif
29197+
29198+RET_ENDP
29199+BEGIN(dec_return_unchecked)
29200+ movl (v), %eax
29201+ movl 4(v), %edx
29202+ subl $1, %eax
29203+ sbbl $0, %edx
29204 movl %eax, (v)
29205 movl %edx, 4(v)
29206 RET_ENDP
29207@@ -143,6 +286,13 @@ BEGIN(add_unless)
29208 adcl %edx, %edi
29209 addl (v), %eax
29210 adcl 4(v), %edx
29211+
29212+#ifdef CONFIG_PAX_REFCOUNT
29213+ into
29214+1234:
29215+ _ASM_EXTABLE(1234b, 2f)
29216+#endif
29217+
29218 cmpl %eax, %ecx
29219 je 3f
29220 1:
29221@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
29222 1:
29223 addl $1, %eax
29224 adcl $0, %edx
29225+
29226+#ifdef CONFIG_PAX_REFCOUNT
29227+ into
29228+1234:
29229+ _ASM_EXTABLE(1234b, 2f)
29230+#endif
29231+
29232 movl %eax, (v)
29233 movl %edx, 4(v)
29234 movl $1, %eax
29235@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
29236 movl 4(v), %edx
29237 subl $1, %eax
29238 sbbl $0, %edx
29239+
29240+#ifdef CONFIG_PAX_REFCOUNT
29241+ into
29242+1234:
29243+ _ASM_EXTABLE(1234b, 1f)
29244+#endif
29245+
29246 js 1f
29247 movl %eax, (v)
29248 movl %edx, 4(v)
29249diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
29250index f5cc9eb..51fa319 100644
29251--- a/arch/x86/lib/atomic64_cx8_32.S
29252+++ b/arch/x86/lib/atomic64_cx8_32.S
29253@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
29254 CFI_STARTPROC
29255
29256 read64 %ecx
29257+ pax_force_retaddr
29258 ret
29259 CFI_ENDPROC
29260 ENDPROC(atomic64_read_cx8)
29261
29262+ENTRY(atomic64_read_unchecked_cx8)
29263+ CFI_STARTPROC
29264+
29265+ read64 %ecx
29266+ pax_force_retaddr
29267+ ret
29268+ CFI_ENDPROC
29269+ENDPROC(atomic64_read_unchecked_cx8)
29270+
29271 ENTRY(atomic64_set_cx8)
29272 CFI_STARTPROC
29273
29274@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
29275 cmpxchg8b (%esi)
29276 jne 1b
29277
29278+ pax_force_retaddr
29279 ret
29280 CFI_ENDPROC
29281 ENDPROC(atomic64_set_cx8)
29282
29283+ENTRY(atomic64_set_unchecked_cx8)
29284+ CFI_STARTPROC
29285+
29286+1:
29287+/* we don't need LOCK_PREFIX since aligned 64-bit writes
29288+ * are atomic on 586 and newer */
29289+ cmpxchg8b (%esi)
29290+ jne 1b
29291+
29292+ pax_force_retaddr
29293+ ret
29294+ CFI_ENDPROC
29295+ENDPROC(atomic64_set_unchecked_cx8)
29296+
29297 ENTRY(atomic64_xchg_cx8)
29298 CFI_STARTPROC
29299
29300@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
29301 cmpxchg8b (%esi)
29302 jne 1b
29303
29304+ pax_force_retaddr
29305 ret
29306 CFI_ENDPROC
29307 ENDPROC(atomic64_xchg_cx8)
29308
29309-.macro addsub_return func ins insc
29310-ENTRY(atomic64_\func\()_return_cx8)
29311+.macro addsub_return func ins insc unchecked=""
29312+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29313 CFI_STARTPROC
29314 SAVE ebp
29315 SAVE ebx
29316@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
29317 movl %edx, %ecx
29318 \ins\()l %esi, %ebx
29319 \insc\()l %edi, %ecx
29320+
29321+.ifb \unchecked
29322+#ifdef CONFIG_PAX_REFCOUNT
29323+ into
29324+2:
29325+ _ASM_EXTABLE(2b, 3f)
29326+#endif
29327+.endif
29328+
29329 LOCK_PREFIX
29330 cmpxchg8b (%ebp)
29331 jne 1b
29332-
29333-10:
29334 movl %ebx, %eax
29335 movl %ecx, %edx
29336+
29337+.ifb \unchecked
29338+#ifdef CONFIG_PAX_REFCOUNT
29339+3:
29340+#endif
29341+.endif
29342+
29343 RESTORE edi
29344 RESTORE esi
29345 RESTORE ebx
29346 RESTORE ebp
29347+ pax_force_retaddr
29348 ret
29349 CFI_ENDPROC
29350-ENDPROC(atomic64_\func\()_return_cx8)
29351+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29352 .endm
29353
29354 addsub_return add add adc
29355 addsub_return sub sub sbb
29356+addsub_return add add adc _unchecked
29357+addsub_return sub sub sbb _unchecked
29358
29359-.macro incdec_return func ins insc
29360-ENTRY(atomic64_\func\()_return_cx8)
29361+.macro incdec_return func ins insc unchecked=""
29362+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29363 CFI_STARTPROC
29364 SAVE ebx
29365
29366@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
29367 movl %edx, %ecx
29368 \ins\()l $1, %ebx
29369 \insc\()l $0, %ecx
29370+
29371+.ifb \unchecked
29372+#ifdef CONFIG_PAX_REFCOUNT
29373+ into
29374+2:
29375+ _ASM_EXTABLE(2b, 3f)
29376+#endif
29377+.endif
29378+
29379 LOCK_PREFIX
29380 cmpxchg8b (%esi)
29381 jne 1b
29382
29383-10:
29384 movl %ebx, %eax
29385 movl %ecx, %edx
29386+
29387+.ifb \unchecked
29388+#ifdef CONFIG_PAX_REFCOUNT
29389+3:
29390+#endif
29391+.endif
29392+
29393 RESTORE ebx
29394+ pax_force_retaddr
29395 ret
29396 CFI_ENDPROC
29397-ENDPROC(atomic64_\func\()_return_cx8)
29398+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29399 .endm
29400
29401 incdec_return inc add adc
29402 incdec_return dec sub sbb
29403+incdec_return inc add adc _unchecked
29404+incdec_return dec sub sbb _unchecked
29405
29406 ENTRY(atomic64_dec_if_positive_cx8)
29407 CFI_STARTPROC
29408@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
29409 movl %edx, %ecx
29410 subl $1, %ebx
29411 sbb $0, %ecx
29412+
29413+#ifdef CONFIG_PAX_REFCOUNT
29414+ into
29415+1234:
29416+ _ASM_EXTABLE(1234b, 2f)
29417+#endif
29418+
29419 js 2f
29420 LOCK_PREFIX
29421 cmpxchg8b (%esi)
29422@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
29423 movl %ebx, %eax
29424 movl %ecx, %edx
29425 RESTORE ebx
29426+ pax_force_retaddr
29427 ret
29428 CFI_ENDPROC
29429 ENDPROC(atomic64_dec_if_positive_cx8)
29430@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
29431 movl %edx, %ecx
29432 addl %ebp, %ebx
29433 adcl %edi, %ecx
29434+
29435+#ifdef CONFIG_PAX_REFCOUNT
29436+ into
29437+1234:
29438+ _ASM_EXTABLE(1234b, 3f)
29439+#endif
29440+
29441 LOCK_PREFIX
29442 cmpxchg8b (%esi)
29443 jne 1b
29444@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
29445 CFI_ADJUST_CFA_OFFSET -8
29446 RESTORE ebx
29447 RESTORE ebp
29448+ pax_force_retaddr
29449 ret
29450 4:
29451 cmpl %edx, 4(%esp)
29452@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
29453 xorl %ecx, %ecx
29454 addl $1, %ebx
29455 adcl %edx, %ecx
29456+
29457+#ifdef CONFIG_PAX_REFCOUNT
29458+ into
29459+1234:
29460+ _ASM_EXTABLE(1234b, 3f)
29461+#endif
29462+
29463 LOCK_PREFIX
29464 cmpxchg8b (%esi)
29465 jne 1b
29466@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
29467 movl $1, %eax
29468 3:
29469 RESTORE ebx
29470+ pax_force_retaddr
29471 ret
29472 CFI_ENDPROC
29473 ENDPROC(atomic64_inc_not_zero_cx8)
29474diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
29475index e78b8eee..7e173a8 100644
29476--- a/arch/x86/lib/checksum_32.S
29477+++ b/arch/x86/lib/checksum_32.S
29478@@ -29,7 +29,8 @@
29479 #include <asm/dwarf2.h>
29480 #include <asm/errno.h>
29481 #include <asm/asm.h>
29482-
29483+#include <asm/segment.h>
29484+
29485 /*
29486 * computes a partial checksum, e.g. for TCP/UDP fragments
29487 */
29488@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
29489
29490 #define ARGBASE 16
29491 #define FP 12
29492-
29493-ENTRY(csum_partial_copy_generic)
29494+
29495+ENTRY(csum_partial_copy_generic_to_user)
29496 CFI_STARTPROC
29497+
29498+#ifdef CONFIG_PAX_MEMORY_UDEREF
29499+ pushl_cfi %gs
29500+ popl_cfi %es
29501+ jmp csum_partial_copy_generic
29502+#endif
29503+
29504+ENTRY(csum_partial_copy_generic_from_user)
29505+
29506+#ifdef CONFIG_PAX_MEMORY_UDEREF
29507+ pushl_cfi %gs
29508+ popl_cfi %ds
29509+#endif
29510+
29511+ENTRY(csum_partial_copy_generic)
29512 subl $4,%esp
29513 CFI_ADJUST_CFA_OFFSET 4
29514 pushl_cfi %edi
29515@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
29516 jmp 4f
29517 SRC(1: movw (%esi), %bx )
29518 addl $2, %esi
29519-DST( movw %bx, (%edi) )
29520+DST( movw %bx, %es:(%edi) )
29521 addl $2, %edi
29522 addw %bx, %ax
29523 adcl $0, %eax
29524@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
29525 SRC(1: movl (%esi), %ebx )
29526 SRC( movl 4(%esi), %edx )
29527 adcl %ebx, %eax
29528-DST( movl %ebx, (%edi) )
29529+DST( movl %ebx, %es:(%edi) )
29530 adcl %edx, %eax
29531-DST( movl %edx, 4(%edi) )
29532+DST( movl %edx, %es:4(%edi) )
29533
29534 SRC( movl 8(%esi), %ebx )
29535 SRC( movl 12(%esi), %edx )
29536 adcl %ebx, %eax
29537-DST( movl %ebx, 8(%edi) )
29538+DST( movl %ebx, %es:8(%edi) )
29539 adcl %edx, %eax
29540-DST( movl %edx, 12(%edi) )
29541+DST( movl %edx, %es:12(%edi) )
29542
29543 SRC( movl 16(%esi), %ebx )
29544 SRC( movl 20(%esi), %edx )
29545 adcl %ebx, %eax
29546-DST( movl %ebx, 16(%edi) )
29547+DST( movl %ebx, %es:16(%edi) )
29548 adcl %edx, %eax
29549-DST( movl %edx, 20(%edi) )
29550+DST( movl %edx, %es:20(%edi) )
29551
29552 SRC( movl 24(%esi), %ebx )
29553 SRC( movl 28(%esi), %edx )
29554 adcl %ebx, %eax
29555-DST( movl %ebx, 24(%edi) )
29556+DST( movl %ebx, %es:24(%edi) )
29557 adcl %edx, %eax
29558-DST( movl %edx, 28(%edi) )
29559+DST( movl %edx, %es:28(%edi) )
29560
29561 lea 32(%esi), %esi
29562 lea 32(%edi), %edi
29563@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
29564 shrl $2, %edx # This clears CF
29565 SRC(3: movl (%esi), %ebx )
29566 adcl %ebx, %eax
29567-DST( movl %ebx, (%edi) )
29568+DST( movl %ebx, %es:(%edi) )
29569 lea 4(%esi), %esi
29570 lea 4(%edi), %edi
29571 dec %edx
29572@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
29573 jb 5f
29574 SRC( movw (%esi), %cx )
29575 leal 2(%esi), %esi
29576-DST( movw %cx, (%edi) )
29577+DST( movw %cx, %es:(%edi) )
29578 leal 2(%edi), %edi
29579 je 6f
29580 shll $16,%ecx
29581 SRC(5: movb (%esi), %cl )
29582-DST( movb %cl, (%edi) )
29583+DST( movb %cl, %es:(%edi) )
29584 6: addl %ecx, %eax
29585 adcl $0, %eax
29586 7:
29587@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
29588
29589 6001:
29590 movl ARGBASE+20(%esp), %ebx # src_err_ptr
29591- movl $-EFAULT, (%ebx)
29592+ movl $-EFAULT, %ss:(%ebx)
29593
29594 # zero the complete destination - computing the rest
29595 # is too much work
29596@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
29597
29598 6002:
29599 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29600- movl $-EFAULT,(%ebx)
29601+ movl $-EFAULT,%ss:(%ebx)
29602 jmp 5000b
29603
29604 .previous
29605
29606+ pushl_cfi %ss
29607+ popl_cfi %ds
29608+ pushl_cfi %ss
29609+ popl_cfi %es
29610 popl_cfi %ebx
29611 CFI_RESTORE ebx
29612 popl_cfi %esi
29613@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
29614 popl_cfi %ecx # equivalent to addl $4,%esp
29615 ret
29616 CFI_ENDPROC
29617-ENDPROC(csum_partial_copy_generic)
29618+ENDPROC(csum_partial_copy_generic_to_user)
29619
29620 #else
29621
29622 /* Version for PentiumII/PPro */
29623
29624 #define ROUND1(x) \
29625+ nop; nop; nop; \
29626 SRC(movl x(%esi), %ebx ) ; \
29627 addl %ebx, %eax ; \
29628- DST(movl %ebx, x(%edi) ) ;
29629+ DST(movl %ebx, %es:x(%edi)) ;
29630
29631 #define ROUND(x) \
29632+ nop; nop; nop; \
29633 SRC(movl x(%esi), %ebx ) ; \
29634 adcl %ebx, %eax ; \
29635- DST(movl %ebx, x(%edi) ) ;
29636+ DST(movl %ebx, %es:x(%edi)) ;
29637
29638 #define ARGBASE 12
29639-
29640-ENTRY(csum_partial_copy_generic)
29641+
29642+ENTRY(csum_partial_copy_generic_to_user)
29643 CFI_STARTPROC
29644+
29645+#ifdef CONFIG_PAX_MEMORY_UDEREF
29646+ pushl_cfi %gs
29647+ popl_cfi %es
29648+ jmp csum_partial_copy_generic
29649+#endif
29650+
29651+ENTRY(csum_partial_copy_generic_from_user)
29652+
29653+#ifdef CONFIG_PAX_MEMORY_UDEREF
29654+ pushl_cfi %gs
29655+ popl_cfi %ds
29656+#endif
29657+
29658+ENTRY(csum_partial_copy_generic)
29659 pushl_cfi %ebx
29660 CFI_REL_OFFSET ebx, 0
29661 pushl_cfi %edi
29662@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
29663 subl %ebx, %edi
29664 lea -1(%esi),%edx
29665 andl $-32,%edx
29666- lea 3f(%ebx,%ebx), %ebx
29667+ lea 3f(%ebx,%ebx,2), %ebx
29668 testl %esi, %esi
29669 jmp *%ebx
29670 1: addl $64,%esi
29671@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
29672 jb 5f
29673 SRC( movw (%esi), %dx )
29674 leal 2(%esi), %esi
29675-DST( movw %dx, (%edi) )
29676+DST( movw %dx, %es:(%edi) )
29677 leal 2(%edi), %edi
29678 je 6f
29679 shll $16,%edx
29680 5:
29681 SRC( movb (%esi), %dl )
29682-DST( movb %dl, (%edi) )
29683+DST( movb %dl, %es:(%edi) )
29684 6: addl %edx, %eax
29685 adcl $0, %eax
29686 7:
29687 .section .fixup, "ax"
29688 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
29689- movl $-EFAULT, (%ebx)
29690+ movl $-EFAULT, %ss:(%ebx)
29691 # zero the complete destination (computing the rest is too much work)
29692 movl ARGBASE+8(%esp),%edi # dst
29693 movl ARGBASE+12(%esp),%ecx # len
29694@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
29695 rep; stosb
29696 jmp 7b
29697 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29698- movl $-EFAULT, (%ebx)
29699+ movl $-EFAULT, %ss:(%ebx)
29700 jmp 7b
29701 .previous
29702
29703+#ifdef CONFIG_PAX_MEMORY_UDEREF
29704+ pushl_cfi %ss
29705+ popl_cfi %ds
29706+ pushl_cfi %ss
29707+ popl_cfi %es
29708+#endif
29709+
29710 popl_cfi %esi
29711 CFI_RESTORE esi
29712 popl_cfi %edi
29713@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
29714 CFI_RESTORE ebx
29715 ret
29716 CFI_ENDPROC
29717-ENDPROC(csum_partial_copy_generic)
29718+ENDPROC(csum_partial_copy_generic_to_user)
29719
29720 #undef ROUND
29721 #undef ROUND1
29722diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
29723index f2145cf..cea889d 100644
29724--- a/arch/x86/lib/clear_page_64.S
29725+++ b/arch/x86/lib/clear_page_64.S
29726@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
29727 movl $4096/8,%ecx
29728 xorl %eax,%eax
29729 rep stosq
29730+ pax_force_retaddr
29731 ret
29732 CFI_ENDPROC
29733 ENDPROC(clear_page_c)
29734@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
29735 movl $4096,%ecx
29736 xorl %eax,%eax
29737 rep stosb
29738+ pax_force_retaddr
29739 ret
29740 CFI_ENDPROC
29741 ENDPROC(clear_page_c_e)
29742@@ -43,6 +45,7 @@ ENTRY(clear_page)
29743 leaq 64(%rdi),%rdi
29744 jnz .Lloop
29745 nop
29746+ pax_force_retaddr
29747 ret
29748 CFI_ENDPROC
29749 .Lclear_page_end:
29750@@ -58,7 +61,7 @@ ENDPROC(clear_page)
29751
29752 #include <asm/cpufeature.h>
29753
29754- .section .altinstr_replacement,"ax"
29755+ .section .altinstr_replacement,"a"
29756 1: .byte 0xeb /* jmp <disp8> */
29757 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
29758 2: .byte 0xeb /* jmp <disp8> */
29759diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
29760index 40a1725..5d12ac4 100644
29761--- a/arch/x86/lib/cmpxchg16b_emu.S
29762+++ b/arch/x86/lib/cmpxchg16b_emu.S
29763@@ -8,6 +8,7 @@
29764 #include <linux/linkage.h>
29765 #include <asm/dwarf2.h>
29766 #include <asm/percpu.h>
29767+#include <asm/alternative-asm.h>
29768
29769 .text
29770
29771@@ -46,12 +47,14 @@ CFI_STARTPROC
29772 CFI_REMEMBER_STATE
29773 popfq_cfi
29774 mov $1, %al
29775+ pax_force_retaddr
29776 ret
29777
29778 CFI_RESTORE_STATE
29779 .Lnot_same:
29780 popfq_cfi
29781 xor %al,%al
29782+ pax_force_retaddr
29783 ret
29784
29785 CFI_ENDPROC
29786diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
29787index 176cca6..e0d658e 100644
29788--- a/arch/x86/lib/copy_page_64.S
29789+++ b/arch/x86/lib/copy_page_64.S
29790@@ -9,6 +9,7 @@ copy_page_rep:
29791 CFI_STARTPROC
29792 movl $4096/8, %ecx
29793 rep movsq
29794+ pax_force_retaddr
29795 ret
29796 CFI_ENDPROC
29797 ENDPROC(copy_page_rep)
29798@@ -24,8 +25,8 @@ ENTRY(copy_page)
29799 CFI_ADJUST_CFA_OFFSET 2*8
29800 movq %rbx, (%rsp)
29801 CFI_REL_OFFSET rbx, 0
29802- movq %r12, 1*8(%rsp)
29803- CFI_REL_OFFSET r12, 1*8
29804+ movq %r13, 1*8(%rsp)
29805+ CFI_REL_OFFSET r13, 1*8
29806
29807 movl $(4096/64)-5, %ecx
29808 .p2align 4
29809@@ -38,7 +39,7 @@ ENTRY(copy_page)
29810 movq 0x8*4(%rsi), %r9
29811 movq 0x8*5(%rsi), %r10
29812 movq 0x8*6(%rsi), %r11
29813- movq 0x8*7(%rsi), %r12
29814+ movq 0x8*7(%rsi), %r13
29815
29816 prefetcht0 5*64(%rsi)
29817
29818@@ -49,7 +50,7 @@ ENTRY(copy_page)
29819 movq %r9, 0x8*4(%rdi)
29820 movq %r10, 0x8*5(%rdi)
29821 movq %r11, 0x8*6(%rdi)
29822- movq %r12, 0x8*7(%rdi)
29823+ movq %r13, 0x8*7(%rdi)
29824
29825 leaq 64 (%rsi), %rsi
29826 leaq 64 (%rdi), %rdi
29827@@ -68,7 +69,7 @@ ENTRY(copy_page)
29828 movq 0x8*4(%rsi), %r9
29829 movq 0x8*5(%rsi), %r10
29830 movq 0x8*6(%rsi), %r11
29831- movq 0x8*7(%rsi), %r12
29832+ movq 0x8*7(%rsi), %r13
29833
29834 movq %rax, 0x8*0(%rdi)
29835 movq %rbx, 0x8*1(%rdi)
29836@@ -77,7 +78,7 @@ ENTRY(copy_page)
29837 movq %r9, 0x8*4(%rdi)
29838 movq %r10, 0x8*5(%rdi)
29839 movq %r11, 0x8*6(%rdi)
29840- movq %r12, 0x8*7(%rdi)
29841+ movq %r13, 0x8*7(%rdi)
29842
29843 leaq 64(%rdi), %rdi
29844 leaq 64(%rsi), %rsi
29845@@ -85,10 +86,11 @@ ENTRY(copy_page)
29846
29847 movq (%rsp), %rbx
29848 CFI_RESTORE rbx
29849- movq 1*8(%rsp), %r12
29850- CFI_RESTORE r12
29851+ movq 1*8(%rsp), %r13
29852+ CFI_RESTORE r13
29853 addq $2*8, %rsp
29854 CFI_ADJUST_CFA_OFFSET -2*8
29855+ pax_force_retaddr
29856 ret
29857 .Lcopy_page_end:
29858 CFI_ENDPROC
29859@@ -99,7 +101,7 @@ ENDPROC(copy_page)
29860
29861 #include <asm/cpufeature.h>
29862
29863- .section .altinstr_replacement,"ax"
29864+ .section .altinstr_replacement,"a"
29865 1: .byte 0xeb /* jmp <disp8> */
29866 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
29867 2:
29868diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
29869index dee945d..a84067b 100644
29870--- a/arch/x86/lib/copy_user_64.S
29871+++ b/arch/x86/lib/copy_user_64.S
29872@@ -18,31 +18,7 @@
29873 #include <asm/alternative-asm.h>
29874 #include <asm/asm.h>
29875 #include <asm/smap.h>
29876-
29877-/*
29878- * By placing feature2 after feature1 in altinstructions section, we logically
29879- * implement:
29880- * If CPU has feature2, jmp to alt2 is used
29881- * else if CPU has feature1, jmp to alt1 is used
29882- * else jmp to orig is used.
29883- */
29884- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
29885-0:
29886- .byte 0xe9 /* 32bit jump */
29887- .long \orig-1f /* by default jump to orig */
29888-1:
29889- .section .altinstr_replacement,"ax"
29890-2: .byte 0xe9 /* near jump with 32bit immediate */
29891- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
29892-3: .byte 0xe9 /* near jump with 32bit immediate */
29893- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
29894- .previous
29895-
29896- .section .altinstructions,"a"
29897- altinstruction_entry 0b,2b,\feature1,5,5
29898- altinstruction_entry 0b,3b,\feature2,5,5
29899- .previous
29900- .endm
29901+#include <asm/pgtable.h>
29902
29903 .macro ALIGN_DESTINATION
29904 #ifdef FIX_ALIGNMENT
29905@@ -70,52 +46,6 @@
29906 #endif
29907 .endm
29908
29909-/* Standard copy_to_user with segment limit checking */
29910-ENTRY(_copy_to_user)
29911- CFI_STARTPROC
29912- GET_THREAD_INFO(%rax)
29913- movq %rdi,%rcx
29914- addq %rdx,%rcx
29915- jc bad_to_user
29916- cmpq TI_addr_limit(%rax),%rcx
29917- ja bad_to_user
29918- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29919- copy_user_generic_unrolled,copy_user_generic_string, \
29920- copy_user_enhanced_fast_string
29921- CFI_ENDPROC
29922-ENDPROC(_copy_to_user)
29923-
29924-/* Standard copy_from_user with segment limit checking */
29925-ENTRY(_copy_from_user)
29926- CFI_STARTPROC
29927- GET_THREAD_INFO(%rax)
29928- movq %rsi,%rcx
29929- addq %rdx,%rcx
29930- jc bad_from_user
29931- cmpq TI_addr_limit(%rax),%rcx
29932- ja bad_from_user
29933- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29934- copy_user_generic_unrolled,copy_user_generic_string, \
29935- copy_user_enhanced_fast_string
29936- CFI_ENDPROC
29937-ENDPROC(_copy_from_user)
29938-
29939- .section .fixup,"ax"
29940- /* must zero dest */
29941-ENTRY(bad_from_user)
29942-bad_from_user:
29943- CFI_STARTPROC
29944- movl %edx,%ecx
29945- xorl %eax,%eax
29946- rep
29947- stosb
29948-bad_to_user:
29949- movl %edx,%eax
29950- ret
29951- CFI_ENDPROC
29952-ENDPROC(bad_from_user)
29953- .previous
29954-
29955 /*
29956 * copy_user_generic_unrolled - memory copy with exception handling.
29957 * This version is for CPUs like P4 that don't have efficient micro
29958@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
29959 */
29960 ENTRY(copy_user_generic_unrolled)
29961 CFI_STARTPROC
29962+ ASM_PAX_OPEN_USERLAND
29963 ASM_STAC
29964 cmpl $8,%edx
29965 jb 20f /* less then 8 bytes, go to byte copy loop */
29966@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
29967 jnz 21b
29968 23: xor %eax,%eax
29969 ASM_CLAC
29970+ ASM_PAX_CLOSE_USERLAND
29971+ pax_force_retaddr
29972 ret
29973
29974 .section .fixup,"ax"
29975@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
29976 */
29977 ENTRY(copy_user_generic_string)
29978 CFI_STARTPROC
29979+ ASM_PAX_OPEN_USERLAND
29980 ASM_STAC
29981 cmpl $8,%edx
29982 jb 2f /* less than 8 bytes, go to byte copy loop */
29983@@ -249,6 +183,8 @@ ENTRY(copy_user_generic_string)
29984 movsb
29985 xorl %eax,%eax
29986 ASM_CLAC
29987+ ASM_PAX_CLOSE_USERLAND
29988+ pax_force_retaddr
29989 ret
29990
29991 .section .fixup,"ax"
29992@@ -276,12 +212,15 @@ ENDPROC(copy_user_generic_string)
29993 */
29994 ENTRY(copy_user_enhanced_fast_string)
29995 CFI_STARTPROC
29996+ ASM_PAX_OPEN_USERLAND
29997 ASM_STAC
29998 movl %edx,%ecx
29999 1: rep
30000 movsb
30001 xorl %eax,%eax
30002 ASM_CLAC
30003+ ASM_PAX_CLOSE_USERLAND
30004+ pax_force_retaddr
30005 ret
30006
30007 .section .fixup,"ax"
30008diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
30009index 6a4f43c..c70fb52 100644
30010--- a/arch/x86/lib/copy_user_nocache_64.S
30011+++ b/arch/x86/lib/copy_user_nocache_64.S
30012@@ -8,6 +8,7 @@
30013
30014 #include <linux/linkage.h>
30015 #include <asm/dwarf2.h>
30016+#include <asm/alternative-asm.h>
30017
30018 #define FIX_ALIGNMENT 1
30019
30020@@ -16,6 +17,7 @@
30021 #include <asm/thread_info.h>
30022 #include <asm/asm.h>
30023 #include <asm/smap.h>
30024+#include <asm/pgtable.h>
30025
30026 .macro ALIGN_DESTINATION
30027 #ifdef FIX_ALIGNMENT
30028@@ -49,6 +51,16 @@
30029 */
30030 ENTRY(__copy_user_nocache)
30031 CFI_STARTPROC
30032+
30033+#ifdef CONFIG_PAX_MEMORY_UDEREF
30034+ mov pax_user_shadow_base,%rcx
30035+ cmp %rcx,%rsi
30036+ jae 1f
30037+ add %rcx,%rsi
30038+1:
30039+#endif
30040+
30041+ ASM_PAX_OPEN_USERLAND
30042 ASM_STAC
30043 cmpl $8,%edx
30044 jb 20f /* less then 8 bytes, go to byte copy loop */
30045@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
30046 jnz 21b
30047 23: xorl %eax,%eax
30048 ASM_CLAC
30049+ ASM_PAX_CLOSE_USERLAND
30050 sfence
30051+ pax_force_retaddr
30052 ret
30053
30054 .section .fixup,"ax"
30055diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
30056index 2419d5f..fe52d0e 100644
30057--- a/arch/x86/lib/csum-copy_64.S
30058+++ b/arch/x86/lib/csum-copy_64.S
30059@@ -9,6 +9,7 @@
30060 #include <asm/dwarf2.h>
30061 #include <asm/errno.h>
30062 #include <asm/asm.h>
30063+#include <asm/alternative-asm.h>
30064
30065 /*
30066 * Checksum copy with exception handling.
30067@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
30068 CFI_ADJUST_CFA_OFFSET 7*8
30069 movq %rbx, 2*8(%rsp)
30070 CFI_REL_OFFSET rbx, 2*8
30071- movq %r12, 3*8(%rsp)
30072- CFI_REL_OFFSET r12, 3*8
30073+ movq %r15, 3*8(%rsp)
30074+ CFI_REL_OFFSET r15, 3*8
30075 movq %r14, 4*8(%rsp)
30076 CFI_REL_OFFSET r14, 4*8
30077 movq %r13, 5*8(%rsp)
30078@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
30079 movl %edx, %ecx
30080
30081 xorl %r9d, %r9d
30082- movq %rcx, %r12
30083+ movq %rcx, %r15
30084
30085- shrq $6, %r12
30086+ shrq $6, %r15
30087 jz .Lhandle_tail /* < 64 */
30088
30089 clc
30090
30091 /* main loop. clear in 64 byte blocks */
30092 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
30093- /* r11: temp3, rdx: temp4, r12 loopcnt */
30094+ /* r11: temp3, rdx: temp4, r15 loopcnt */
30095 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
30096 .p2align 4
30097 .Lloop:
30098@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
30099 adcq %r14, %rax
30100 adcq %r13, %rax
30101
30102- decl %r12d
30103+ decl %r15d
30104
30105 dest
30106 movq %rbx, (%rsi)
30107@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
30108 .Lende:
30109 movq 2*8(%rsp), %rbx
30110 CFI_RESTORE rbx
30111- movq 3*8(%rsp), %r12
30112- CFI_RESTORE r12
30113+ movq 3*8(%rsp), %r15
30114+ CFI_RESTORE r15
30115 movq 4*8(%rsp), %r14
30116 CFI_RESTORE r14
30117 movq 5*8(%rsp), %r13
30118@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
30119 CFI_RESTORE rbp
30120 addq $7*8, %rsp
30121 CFI_ADJUST_CFA_OFFSET -7*8
30122+ pax_force_retaddr
30123 ret
30124 CFI_RESTORE_STATE
30125
30126diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
30127index 1318f75..44c30fd 100644
30128--- a/arch/x86/lib/csum-wrappers_64.c
30129+++ b/arch/x86/lib/csum-wrappers_64.c
30130@@ -52,10 +52,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
30131 len -= 2;
30132 }
30133 }
30134+ pax_open_userland();
30135 stac();
30136- isum = csum_partial_copy_generic((__force const void *)src,
30137+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
30138 dst, len, isum, errp, NULL);
30139 clac();
30140+ pax_close_userland();
30141 if (unlikely(*errp))
30142 goto out_err;
30143
30144@@ -109,10 +111,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
30145 }
30146
30147 *errp = 0;
30148+ pax_open_userland();
30149 stac();
30150- ret = csum_partial_copy_generic(src, (void __force *)dst,
30151+ ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
30152 len, isum, NULL, errp);
30153 clac();
30154+ pax_close_userland();
30155 return ret;
30156 }
30157 EXPORT_SYMBOL(csum_partial_copy_to_user);
30158diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
30159index a451235..a74bfa3 100644
30160--- a/arch/x86/lib/getuser.S
30161+++ b/arch/x86/lib/getuser.S
30162@@ -33,17 +33,40 @@
30163 #include <asm/thread_info.h>
30164 #include <asm/asm.h>
30165 #include <asm/smap.h>
30166+#include <asm/segment.h>
30167+#include <asm/pgtable.h>
30168+#include <asm/alternative-asm.h>
30169+
30170+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30171+#define __copyuser_seg gs;
30172+#else
30173+#define __copyuser_seg
30174+#endif
30175
30176 .text
30177 ENTRY(__get_user_1)
30178 CFI_STARTPROC
30179+
30180+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30181 GET_THREAD_INFO(%_ASM_DX)
30182 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30183 jae bad_get_user
30184+
30185+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30186+ mov pax_user_shadow_base,%_ASM_DX
30187+ cmp %_ASM_DX,%_ASM_AX
30188+ jae 1234f
30189+ add %_ASM_DX,%_ASM_AX
30190+1234:
30191+#endif
30192+
30193+#endif
30194+
30195 ASM_STAC
30196-1: movzbl (%_ASM_AX),%edx
30197+1: __copyuser_seg movzbl (%_ASM_AX),%edx
30198 xor %eax,%eax
30199 ASM_CLAC
30200+ pax_force_retaddr
30201 ret
30202 CFI_ENDPROC
30203 ENDPROC(__get_user_1)
30204@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
30205 ENTRY(__get_user_2)
30206 CFI_STARTPROC
30207 add $1,%_ASM_AX
30208+
30209+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30210 jc bad_get_user
30211 GET_THREAD_INFO(%_ASM_DX)
30212 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30213 jae bad_get_user
30214+
30215+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30216+ mov pax_user_shadow_base,%_ASM_DX
30217+ cmp %_ASM_DX,%_ASM_AX
30218+ jae 1234f
30219+ add %_ASM_DX,%_ASM_AX
30220+1234:
30221+#endif
30222+
30223+#endif
30224+
30225 ASM_STAC
30226-2: movzwl -1(%_ASM_AX),%edx
30227+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
30228 xor %eax,%eax
30229 ASM_CLAC
30230+ pax_force_retaddr
30231 ret
30232 CFI_ENDPROC
30233 ENDPROC(__get_user_2)
30234@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
30235 ENTRY(__get_user_4)
30236 CFI_STARTPROC
30237 add $3,%_ASM_AX
30238+
30239+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30240 jc bad_get_user
30241 GET_THREAD_INFO(%_ASM_DX)
30242 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30243 jae bad_get_user
30244+
30245+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30246+ mov pax_user_shadow_base,%_ASM_DX
30247+ cmp %_ASM_DX,%_ASM_AX
30248+ jae 1234f
30249+ add %_ASM_DX,%_ASM_AX
30250+1234:
30251+#endif
30252+
30253+#endif
30254+
30255 ASM_STAC
30256-3: movl -3(%_ASM_AX),%edx
30257+3: __copyuser_seg movl -3(%_ASM_AX),%edx
30258 xor %eax,%eax
30259 ASM_CLAC
30260+ pax_force_retaddr
30261 ret
30262 CFI_ENDPROC
30263 ENDPROC(__get_user_4)
30264@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
30265 GET_THREAD_INFO(%_ASM_DX)
30266 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30267 jae bad_get_user
30268+
30269+#ifdef CONFIG_PAX_MEMORY_UDEREF
30270+ mov pax_user_shadow_base,%_ASM_DX
30271+ cmp %_ASM_DX,%_ASM_AX
30272+ jae 1234f
30273+ add %_ASM_DX,%_ASM_AX
30274+1234:
30275+#endif
30276+
30277 ASM_STAC
30278 4: movq -7(%_ASM_AX),%rdx
30279 xor %eax,%eax
30280 ASM_CLAC
30281+ pax_force_retaddr
30282 ret
30283 #else
30284 add $7,%_ASM_AX
30285@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
30286 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30287 jae bad_get_user_8
30288 ASM_STAC
30289-4: movl -7(%_ASM_AX),%edx
30290-5: movl -3(%_ASM_AX),%ecx
30291+4: __copyuser_seg movl -7(%_ASM_AX),%edx
30292+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
30293 xor %eax,%eax
30294 ASM_CLAC
30295+ pax_force_retaddr
30296 ret
30297 #endif
30298 CFI_ENDPROC
30299@@ -113,6 +175,7 @@ bad_get_user:
30300 xor %edx,%edx
30301 mov $(-EFAULT),%_ASM_AX
30302 ASM_CLAC
30303+ pax_force_retaddr
30304 ret
30305 CFI_ENDPROC
30306 END(bad_get_user)
30307@@ -124,6 +187,7 @@ bad_get_user_8:
30308 xor %ecx,%ecx
30309 mov $(-EFAULT),%_ASM_AX
30310 ASM_CLAC
30311+ pax_force_retaddr
30312 ret
30313 CFI_ENDPROC
30314 END(bad_get_user_8)
30315diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
30316index 85994f5..9929d7f 100644
30317--- a/arch/x86/lib/insn.c
30318+++ b/arch/x86/lib/insn.c
30319@@ -20,8 +20,10 @@
30320
30321 #ifdef __KERNEL__
30322 #include <linux/string.h>
30323+#include <asm/pgtable_types.h>
30324 #else
30325 #include <string.h>
30326+#define ktla_ktva(addr) addr
30327 #endif
30328 #include <asm/inat.h>
30329 #include <asm/insn.h>
30330@@ -60,9 +62,9 @@ void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
30331 buf_len = MAX_INSN_SIZE;
30332
30333 memset(insn, 0, sizeof(*insn));
30334- insn->kaddr = kaddr;
30335- insn->end_kaddr = kaddr + buf_len;
30336- insn->next_byte = kaddr;
30337+ insn->kaddr = ktla_ktva(kaddr);
30338+ insn->end_kaddr = insn->kaddr + buf_len;
30339+ insn->next_byte = insn->kaddr;
30340 insn->x86_64 = x86_64 ? 1 : 0;
30341 insn->opnd_bytes = 4;
30342 if (x86_64)
30343diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
30344index 05a95e7..326f2fa 100644
30345--- a/arch/x86/lib/iomap_copy_64.S
30346+++ b/arch/x86/lib/iomap_copy_64.S
30347@@ -17,6 +17,7 @@
30348
30349 #include <linux/linkage.h>
30350 #include <asm/dwarf2.h>
30351+#include <asm/alternative-asm.h>
30352
30353 /*
30354 * override generic version in lib/iomap_copy.c
30355@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
30356 CFI_STARTPROC
30357 movl %edx,%ecx
30358 rep movsd
30359+ pax_force_retaddr
30360 ret
30361 CFI_ENDPROC
30362 ENDPROC(__iowrite32_copy)
30363diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
30364index 89b53c9..97357ca 100644
30365--- a/arch/x86/lib/memcpy_64.S
30366+++ b/arch/x86/lib/memcpy_64.S
30367@@ -24,7 +24,7 @@
30368 * This gets patched over the unrolled variant (below) via the
30369 * alternative instructions framework:
30370 */
30371- .section .altinstr_replacement, "ax", @progbits
30372+ .section .altinstr_replacement, "a", @progbits
30373 .Lmemcpy_c:
30374 movq %rdi, %rax
30375 movq %rdx, %rcx
30376@@ -33,6 +33,7 @@
30377 rep movsq
30378 movl %edx, %ecx
30379 rep movsb
30380+ pax_force_retaddr
30381 ret
30382 .Lmemcpy_e:
30383 .previous
30384@@ -44,11 +45,12 @@
30385 * This gets patched over the unrolled variant (below) via the
30386 * alternative instructions framework:
30387 */
30388- .section .altinstr_replacement, "ax", @progbits
30389+ .section .altinstr_replacement, "a", @progbits
30390 .Lmemcpy_c_e:
30391 movq %rdi, %rax
30392 movq %rdx, %rcx
30393 rep movsb
30394+ pax_force_retaddr
30395 ret
30396 .Lmemcpy_e_e:
30397 .previous
30398@@ -138,6 +140,7 @@ ENTRY(memcpy)
30399 movq %r9, 1*8(%rdi)
30400 movq %r10, -2*8(%rdi, %rdx)
30401 movq %r11, -1*8(%rdi, %rdx)
30402+ pax_force_retaddr
30403 retq
30404 .p2align 4
30405 .Lless_16bytes:
30406@@ -150,6 +153,7 @@ ENTRY(memcpy)
30407 movq -1*8(%rsi, %rdx), %r9
30408 movq %r8, 0*8(%rdi)
30409 movq %r9, -1*8(%rdi, %rdx)
30410+ pax_force_retaddr
30411 retq
30412 .p2align 4
30413 .Lless_8bytes:
30414@@ -163,6 +167,7 @@ ENTRY(memcpy)
30415 movl -4(%rsi, %rdx), %r8d
30416 movl %ecx, (%rdi)
30417 movl %r8d, -4(%rdi, %rdx)
30418+ pax_force_retaddr
30419 retq
30420 .p2align 4
30421 .Lless_3bytes:
30422@@ -181,6 +186,7 @@ ENTRY(memcpy)
30423 movb %cl, (%rdi)
30424
30425 .Lend:
30426+ pax_force_retaddr
30427 retq
30428 CFI_ENDPROC
30429 ENDPROC(memcpy)
30430diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
30431index 9c4b530..830b77a 100644
30432--- a/arch/x86/lib/memmove_64.S
30433+++ b/arch/x86/lib/memmove_64.S
30434@@ -205,14 +205,16 @@ ENTRY(__memmove)
30435 movb (%rsi), %r11b
30436 movb %r11b, (%rdi)
30437 13:
30438+ pax_force_retaddr
30439 retq
30440 CFI_ENDPROC
30441
30442- .section .altinstr_replacement,"ax"
30443+ .section .altinstr_replacement,"a"
30444 .Lmemmove_begin_forward_efs:
30445 /* Forward moving data. */
30446 movq %rdx, %rcx
30447 rep movsb
30448+ pax_force_retaddr
30449 retq
30450 .Lmemmove_end_forward_efs:
30451 .previous
30452diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
30453index 6f44935..fbf5f6d 100644
30454--- a/arch/x86/lib/memset_64.S
30455+++ b/arch/x86/lib/memset_64.S
30456@@ -16,7 +16,7 @@
30457 *
30458 * rax original destination
30459 */
30460- .section .altinstr_replacement, "ax", @progbits
30461+ .section .altinstr_replacement, "a", @progbits
30462 .Lmemset_c:
30463 movq %rdi,%r9
30464 movq %rdx,%rcx
30465@@ -30,6 +30,7 @@
30466 movl %edx,%ecx
30467 rep stosb
30468 movq %r9,%rax
30469+ pax_force_retaddr
30470 ret
30471 .Lmemset_e:
30472 .previous
30473@@ -45,13 +46,14 @@
30474 *
30475 * rax original destination
30476 */
30477- .section .altinstr_replacement, "ax", @progbits
30478+ .section .altinstr_replacement, "a", @progbits
30479 .Lmemset_c_e:
30480 movq %rdi,%r9
30481 movb %sil,%al
30482 movq %rdx,%rcx
30483 rep stosb
30484 movq %r9,%rax
30485+ pax_force_retaddr
30486 ret
30487 .Lmemset_e_e:
30488 .previous
30489@@ -120,6 +122,7 @@ ENTRY(__memset)
30490
30491 .Lende:
30492 movq %r10,%rax
30493+ pax_force_retaddr
30494 ret
30495
30496 CFI_RESTORE_STATE
30497diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
30498index c9f2d9b..e7fd2c0 100644
30499--- a/arch/x86/lib/mmx_32.c
30500+++ b/arch/x86/lib/mmx_32.c
30501@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30502 {
30503 void *p;
30504 int i;
30505+ unsigned long cr0;
30506
30507 if (unlikely(in_interrupt()))
30508 return __memcpy(to, from, len);
30509@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30510 kernel_fpu_begin();
30511
30512 __asm__ __volatile__ (
30513- "1: prefetch (%0)\n" /* This set is 28 bytes */
30514- " prefetch 64(%0)\n"
30515- " prefetch 128(%0)\n"
30516- " prefetch 192(%0)\n"
30517- " prefetch 256(%0)\n"
30518+ "1: prefetch (%1)\n" /* This set is 28 bytes */
30519+ " prefetch 64(%1)\n"
30520+ " prefetch 128(%1)\n"
30521+ " prefetch 192(%1)\n"
30522+ " prefetch 256(%1)\n"
30523 "2: \n"
30524 ".section .fixup, \"ax\"\n"
30525- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30526+ "3: \n"
30527+
30528+#ifdef CONFIG_PAX_KERNEXEC
30529+ " movl %%cr0, %0\n"
30530+ " movl %0, %%eax\n"
30531+ " andl $0xFFFEFFFF, %%eax\n"
30532+ " movl %%eax, %%cr0\n"
30533+#endif
30534+
30535+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30536+
30537+#ifdef CONFIG_PAX_KERNEXEC
30538+ " movl %0, %%cr0\n"
30539+#endif
30540+
30541 " jmp 2b\n"
30542 ".previous\n"
30543 _ASM_EXTABLE(1b, 3b)
30544- : : "r" (from));
30545+ : "=&r" (cr0) : "r" (from) : "ax");
30546
30547 for ( ; i > 5; i--) {
30548 __asm__ __volatile__ (
30549- "1: prefetch 320(%0)\n"
30550- "2: movq (%0), %%mm0\n"
30551- " movq 8(%0), %%mm1\n"
30552- " movq 16(%0), %%mm2\n"
30553- " movq 24(%0), %%mm3\n"
30554- " movq %%mm0, (%1)\n"
30555- " movq %%mm1, 8(%1)\n"
30556- " movq %%mm2, 16(%1)\n"
30557- " movq %%mm3, 24(%1)\n"
30558- " movq 32(%0), %%mm0\n"
30559- " movq 40(%0), %%mm1\n"
30560- " movq 48(%0), %%mm2\n"
30561- " movq 56(%0), %%mm3\n"
30562- " movq %%mm0, 32(%1)\n"
30563- " movq %%mm1, 40(%1)\n"
30564- " movq %%mm2, 48(%1)\n"
30565- " movq %%mm3, 56(%1)\n"
30566+ "1: prefetch 320(%1)\n"
30567+ "2: movq (%1), %%mm0\n"
30568+ " movq 8(%1), %%mm1\n"
30569+ " movq 16(%1), %%mm2\n"
30570+ " movq 24(%1), %%mm3\n"
30571+ " movq %%mm0, (%2)\n"
30572+ " movq %%mm1, 8(%2)\n"
30573+ " movq %%mm2, 16(%2)\n"
30574+ " movq %%mm3, 24(%2)\n"
30575+ " movq 32(%1), %%mm0\n"
30576+ " movq 40(%1), %%mm1\n"
30577+ " movq 48(%1), %%mm2\n"
30578+ " movq 56(%1), %%mm3\n"
30579+ " movq %%mm0, 32(%2)\n"
30580+ " movq %%mm1, 40(%2)\n"
30581+ " movq %%mm2, 48(%2)\n"
30582+ " movq %%mm3, 56(%2)\n"
30583 ".section .fixup, \"ax\"\n"
30584- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30585+ "3:\n"
30586+
30587+#ifdef CONFIG_PAX_KERNEXEC
30588+ " movl %%cr0, %0\n"
30589+ " movl %0, %%eax\n"
30590+ " andl $0xFFFEFFFF, %%eax\n"
30591+ " movl %%eax, %%cr0\n"
30592+#endif
30593+
30594+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30595+
30596+#ifdef CONFIG_PAX_KERNEXEC
30597+ " movl %0, %%cr0\n"
30598+#endif
30599+
30600 " jmp 2b\n"
30601 ".previous\n"
30602 _ASM_EXTABLE(1b, 3b)
30603- : : "r" (from), "r" (to) : "memory");
30604+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30605
30606 from += 64;
30607 to += 64;
30608@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
30609 static void fast_copy_page(void *to, void *from)
30610 {
30611 int i;
30612+ unsigned long cr0;
30613
30614 kernel_fpu_begin();
30615
30616@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
30617 * but that is for later. -AV
30618 */
30619 __asm__ __volatile__(
30620- "1: prefetch (%0)\n"
30621- " prefetch 64(%0)\n"
30622- " prefetch 128(%0)\n"
30623- " prefetch 192(%0)\n"
30624- " prefetch 256(%0)\n"
30625+ "1: prefetch (%1)\n"
30626+ " prefetch 64(%1)\n"
30627+ " prefetch 128(%1)\n"
30628+ " prefetch 192(%1)\n"
30629+ " prefetch 256(%1)\n"
30630 "2: \n"
30631 ".section .fixup, \"ax\"\n"
30632- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30633+ "3: \n"
30634+
30635+#ifdef CONFIG_PAX_KERNEXEC
30636+ " movl %%cr0, %0\n"
30637+ " movl %0, %%eax\n"
30638+ " andl $0xFFFEFFFF, %%eax\n"
30639+ " movl %%eax, %%cr0\n"
30640+#endif
30641+
30642+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30643+
30644+#ifdef CONFIG_PAX_KERNEXEC
30645+ " movl %0, %%cr0\n"
30646+#endif
30647+
30648 " jmp 2b\n"
30649 ".previous\n"
30650- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30651+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30652
30653 for (i = 0; i < (4096-320)/64; i++) {
30654 __asm__ __volatile__ (
30655- "1: prefetch 320(%0)\n"
30656- "2: movq (%0), %%mm0\n"
30657- " movntq %%mm0, (%1)\n"
30658- " movq 8(%0), %%mm1\n"
30659- " movntq %%mm1, 8(%1)\n"
30660- " movq 16(%0), %%mm2\n"
30661- " movntq %%mm2, 16(%1)\n"
30662- " movq 24(%0), %%mm3\n"
30663- " movntq %%mm3, 24(%1)\n"
30664- " movq 32(%0), %%mm4\n"
30665- " movntq %%mm4, 32(%1)\n"
30666- " movq 40(%0), %%mm5\n"
30667- " movntq %%mm5, 40(%1)\n"
30668- " movq 48(%0), %%mm6\n"
30669- " movntq %%mm6, 48(%1)\n"
30670- " movq 56(%0), %%mm7\n"
30671- " movntq %%mm7, 56(%1)\n"
30672+ "1: prefetch 320(%1)\n"
30673+ "2: movq (%1), %%mm0\n"
30674+ " movntq %%mm0, (%2)\n"
30675+ " movq 8(%1), %%mm1\n"
30676+ " movntq %%mm1, 8(%2)\n"
30677+ " movq 16(%1), %%mm2\n"
30678+ " movntq %%mm2, 16(%2)\n"
30679+ " movq 24(%1), %%mm3\n"
30680+ " movntq %%mm3, 24(%2)\n"
30681+ " movq 32(%1), %%mm4\n"
30682+ " movntq %%mm4, 32(%2)\n"
30683+ " movq 40(%1), %%mm5\n"
30684+ " movntq %%mm5, 40(%2)\n"
30685+ " movq 48(%1), %%mm6\n"
30686+ " movntq %%mm6, 48(%2)\n"
30687+ " movq 56(%1), %%mm7\n"
30688+ " movntq %%mm7, 56(%2)\n"
30689 ".section .fixup, \"ax\"\n"
30690- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30691+ "3:\n"
30692+
30693+#ifdef CONFIG_PAX_KERNEXEC
30694+ " movl %%cr0, %0\n"
30695+ " movl %0, %%eax\n"
30696+ " andl $0xFFFEFFFF, %%eax\n"
30697+ " movl %%eax, %%cr0\n"
30698+#endif
30699+
30700+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30701+
30702+#ifdef CONFIG_PAX_KERNEXEC
30703+ " movl %0, %%cr0\n"
30704+#endif
30705+
30706 " jmp 2b\n"
30707 ".previous\n"
30708- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
30709+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30710
30711 from += 64;
30712 to += 64;
30713@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
30714 static void fast_copy_page(void *to, void *from)
30715 {
30716 int i;
30717+ unsigned long cr0;
30718
30719 kernel_fpu_begin();
30720
30721 __asm__ __volatile__ (
30722- "1: prefetch (%0)\n"
30723- " prefetch 64(%0)\n"
30724- " prefetch 128(%0)\n"
30725- " prefetch 192(%0)\n"
30726- " prefetch 256(%0)\n"
30727+ "1: prefetch (%1)\n"
30728+ " prefetch 64(%1)\n"
30729+ " prefetch 128(%1)\n"
30730+ " prefetch 192(%1)\n"
30731+ " prefetch 256(%1)\n"
30732 "2: \n"
30733 ".section .fixup, \"ax\"\n"
30734- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30735+ "3: \n"
30736+
30737+#ifdef CONFIG_PAX_KERNEXEC
30738+ " movl %%cr0, %0\n"
30739+ " movl %0, %%eax\n"
30740+ " andl $0xFFFEFFFF, %%eax\n"
30741+ " movl %%eax, %%cr0\n"
30742+#endif
30743+
30744+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30745+
30746+#ifdef CONFIG_PAX_KERNEXEC
30747+ " movl %0, %%cr0\n"
30748+#endif
30749+
30750 " jmp 2b\n"
30751 ".previous\n"
30752- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30753+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30754
30755 for (i = 0; i < 4096/64; i++) {
30756 __asm__ __volatile__ (
30757- "1: prefetch 320(%0)\n"
30758- "2: movq (%0), %%mm0\n"
30759- " movq 8(%0), %%mm1\n"
30760- " movq 16(%0), %%mm2\n"
30761- " movq 24(%0), %%mm3\n"
30762- " movq %%mm0, (%1)\n"
30763- " movq %%mm1, 8(%1)\n"
30764- " movq %%mm2, 16(%1)\n"
30765- " movq %%mm3, 24(%1)\n"
30766- " movq 32(%0), %%mm0\n"
30767- " movq 40(%0), %%mm1\n"
30768- " movq 48(%0), %%mm2\n"
30769- " movq 56(%0), %%mm3\n"
30770- " movq %%mm0, 32(%1)\n"
30771- " movq %%mm1, 40(%1)\n"
30772- " movq %%mm2, 48(%1)\n"
30773- " movq %%mm3, 56(%1)\n"
30774+ "1: prefetch 320(%1)\n"
30775+ "2: movq (%1), %%mm0\n"
30776+ " movq 8(%1), %%mm1\n"
30777+ " movq 16(%1), %%mm2\n"
30778+ " movq 24(%1), %%mm3\n"
30779+ " movq %%mm0, (%2)\n"
30780+ " movq %%mm1, 8(%2)\n"
30781+ " movq %%mm2, 16(%2)\n"
30782+ " movq %%mm3, 24(%2)\n"
30783+ " movq 32(%1), %%mm0\n"
30784+ " movq 40(%1), %%mm1\n"
30785+ " movq 48(%1), %%mm2\n"
30786+ " movq 56(%1), %%mm3\n"
30787+ " movq %%mm0, 32(%2)\n"
30788+ " movq %%mm1, 40(%2)\n"
30789+ " movq %%mm2, 48(%2)\n"
30790+ " movq %%mm3, 56(%2)\n"
30791 ".section .fixup, \"ax\"\n"
30792- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30793+ "3:\n"
30794+
30795+#ifdef CONFIG_PAX_KERNEXEC
30796+ " movl %%cr0, %0\n"
30797+ " movl %0, %%eax\n"
30798+ " andl $0xFFFEFFFF, %%eax\n"
30799+ " movl %%eax, %%cr0\n"
30800+#endif
30801+
30802+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30803+
30804+#ifdef CONFIG_PAX_KERNEXEC
30805+ " movl %0, %%cr0\n"
30806+#endif
30807+
30808 " jmp 2b\n"
30809 ".previous\n"
30810 _ASM_EXTABLE(1b, 3b)
30811- : : "r" (from), "r" (to) : "memory");
30812+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30813
30814 from += 64;
30815 to += 64;
30816diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
30817index f6d13ee..d789440 100644
30818--- a/arch/x86/lib/msr-reg.S
30819+++ b/arch/x86/lib/msr-reg.S
30820@@ -3,6 +3,7 @@
30821 #include <asm/dwarf2.h>
30822 #include <asm/asm.h>
30823 #include <asm/msr.h>
30824+#include <asm/alternative-asm.h>
30825
30826 #ifdef CONFIG_X86_64
30827 /*
30828@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
30829 movl %edi, 28(%r10)
30830 popq_cfi %rbp
30831 popq_cfi %rbx
30832+ pax_force_retaddr
30833 ret
30834 3:
30835 CFI_RESTORE_STATE
30836diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
30837index fc6ba17..14ad9a5 100644
30838--- a/arch/x86/lib/putuser.S
30839+++ b/arch/x86/lib/putuser.S
30840@@ -16,7 +16,9 @@
30841 #include <asm/errno.h>
30842 #include <asm/asm.h>
30843 #include <asm/smap.h>
30844-
30845+#include <asm/segment.h>
30846+#include <asm/pgtable.h>
30847+#include <asm/alternative-asm.h>
30848
30849 /*
30850 * __put_user_X
30851@@ -30,57 +32,125 @@
30852 * as they get called from within inline assembly.
30853 */
30854
30855-#define ENTER CFI_STARTPROC ; \
30856- GET_THREAD_INFO(%_ASM_BX)
30857-#define EXIT ASM_CLAC ; \
30858- ret ; \
30859+#define ENTER CFI_STARTPROC
30860+#define EXIT ASM_CLAC ; \
30861+ pax_force_retaddr ; \
30862+ ret ; \
30863 CFI_ENDPROC
30864
30865+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30866+#define _DEST %_ASM_CX,%_ASM_BX
30867+#else
30868+#define _DEST %_ASM_CX
30869+#endif
30870+
30871+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30872+#define __copyuser_seg gs;
30873+#else
30874+#define __copyuser_seg
30875+#endif
30876+
30877 .text
30878 ENTRY(__put_user_1)
30879 ENTER
30880+
30881+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30882+ GET_THREAD_INFO(%_ASM_BX)
30883 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
30884 jae bad_put_user
30885+
30886+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30887+ mov pax_user_shadow_base,%_ASM_BX
30888+ cmp %_ASM_BX,%_ASM_CX
30889+ jb 1234f
30890+ xor %ebx,%ebx
30891+1234:
30892+#endif
30893+
30894+#endif
30895+
30896 ASM_STAC
30897-1: movb %al,(%_ASM_CX)
30898+1: __copyuser_seg movb %al,(_DEST)
30899 xor %eax,%eax
30900 EXIT
30901 ENDPROC(__put_user_1)
30902
30903 ENTRY(__put_user_2)
30904 ENTER
30905+
30906+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30907+ GET_THREAD_INFO(%_ASM_BX)
30908 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30909 sub $1,%_ASM_BX
30910 cmp %_ASM_BX,%_ASM_CX
30911 jae bad_put_user
30912+
30913+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30914+ mov pax_user_shadow_base,%_ASM_BX
30915+ cmp %_ASM_BX,%_ASM_CX
30916+ jb 1234f
30917+ xor %ebx,%ebx
30918+1234:
30919+#endif
30920+
30921+#endif
30922+
30923 ASM_STAC
30924-2: movw %ax,(%_ASM_CX)
30925+2: __copyuser_seg movw %ax,(_DEST)
30926 xor %eax,%eax
30927 EXIT
30928 ENDPROC(__put_user_2)
30929
30930 ENTRY(__put_user_4)
30931 ENTER
30932+
30933+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30934+ GET_THREAD_INFO(%_ASM_BX)
30935 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30936 sub $3,%_ASM_BX
30937 cmp %_ASM_BX,%_ASM_CX
30938 jae bad_put_user
30939+
30940+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30941+ mov pax_user_shadow_base,%_ASM_BX
30942+ cmp %_ASM_BX,%_ASM_CX
30943+ jb 1234f
30944+ xor %ebx,%ebx
30945+1234:
30946+#endif
30947+
30948+#endif
30949+
30950 ASM_STAC
30951-3: movl %eax,(%_ASM_CX)
30952+3: __copyuser_seg movl %eax,(_DEST)
30953 xor %eax,%eax
30954 EXIT
30955 ENDPROC(__put_user_4)
30956
30957 ENTRY(__put_user_8)
30958 ENTER
30959+
30960+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30961+ GET_THREAD_INFO(%_ASM_BX)
30962 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30963 sub $7,%_ASM_BX
30964 cmp %_ASM_BX,%_ASM_CX
30965 jae bad_put_user
30966+
30967+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30968+ mov pax_user_shadow_base,%_ASM_BX
30969+ cmp %_ASM_BX,%_ASM_CX
30970+ jb 1234f
30971+ xor %ebx,%ebx
30972+1234:
30973+#endif
30974+
30975+#endif
30976+
30977 ASM_STAC
30978-4: mov %_ASM_AX,(%_ASM_CX)
30979+4: __copyuser_seg mov %_ASM_AX,(_DEST)
30980 #ifdef CONFIG_X86_32
30981-5: movl %edx,4(%_ASM_CX)
30982+5: __copyuser_seg movl %edx,4(_DEST)
30983 #endif
30984 xor %eax,%eax
30985 EXIT
30986diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
30987index 5dff5f0..cadebf4 100644
30988--- a/arch/x86/lib/rwsem.S
30989+++ b/arch/x86/lib/rwsem.S
30990@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
30991 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
30992 CFI_RESTORE __ASM_REG(dx)
30993 restore_common_regs
30994+ pax_force_retaddr
30995 ret
30996 CFI_ENDPROC
30997 ENDPROC(call_rwsem_down_read_failed)
30998@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
30999 movq %rax,%rdi
31000 call rwsem_down_write_failed
31001 restore_common_regs
31002+ pax_force_retaddr
31003 ret
31004 CFI_ENDPROC
31005 ENDPROC(call_rwsem_down_write_failed)
31006@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
31007 movq %rax,%rdi
31008 call rwsem_wake
31009 restore_common_regs
31010-1: ret
31011+1: pax_force_retaddr
31012+ ret
31013 CFI_ENDPROC
31014 ENDPROC(call_rwsem_wake)
31015
31016@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
31017 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
31018 CFI_RESTORE __ASM_REG(dx)
31019 restore_common_regs
31020+ pax_force_retaddr
31021 ret
31022 CFI_ENDPROC
31023 ENDPROC(call_rwsem_downgrade_wake)
31024diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
31025index b30b5eb..2b57052 100644
31026--- a/arch/x86/lib/thunk_64.S
31027+++ b/arch/x86/lib/thunk_64.S
31028@@ -9,6 +9,7 @@
31029 #include <asm/dwarf2.h>
31030 #include <asm/calling.h>
31031 #include <asm/asm.h>
31032+#include <asm/alternative-asm.h>
31033
31034 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
31035 .macro THUNK name, func, put_ret_addr_in_rdi=0
31036@@ -16,11 +17,11 @@
31037 \name:
31038 CFI_STARTPROC
31039
31040- /* this one pushes 9 elems, the next one would be %rIP */
31041- SAVE_ARGS
31042+ /* this one pushes 15+1 elems, the next one would be %rIP */
31043+ SAVE_ARGS 8
31044
31045 .if \put_ret_addr_in_rdi
31046- movq_cfi_restore 9*8, rdi
31047+ movq_cfi_restore RIP, rdi
31048 .endif
31049
31050 call \func
31051@@ -47,9 +48,10 @@
31052
31053 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
31054 CFI_STARTPROC
31055- SAVE_ARGS
31056+ SAVE_ARGS 8
31057 restore:
31058- RESTORE_ARGS
31059+ RESTORE_ARGS 1,8
31060+ pax_force_retaddr
31061 ret
31062 CFI_ENDPROC
31063 _ASM_NOKPROBE(restore)
31064diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
31065index e2f5e21..4b22130 100644
31066--- a/arch/x86/lib/usercopy_32.c
31067+++ b/arch/x86/lib/usercopy_32.c
31068@@ -42,11 +42,13 @@ do { \
31069 int __d0; \
31070 might_fault(); \
31071 __asm__ __volatile__( \
31072+ __COPYUSER_SET_ES \
31073 ASM_STAC "\n" \
31074 "0: rep; stosl\n" \
31075 " movl %2,%0\n" \
31076 "1: rep; stosb\n" \
31077 "2: " ASM_CLAC "\n" \
31078+ __COPYUSER_RESTORE_ES \
31079 ".section .fixup,\"ax\"\n" \
31080 "3: lea 0(%2,%0,4),%0\n" \
31081 " jmp 2b\n" \
31082@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
31083
31084 #ifdef CONFIG_X86_INTEL_USERCOPY
31085 static unsigned long
31086-__copy_user_intel(void __user *to, const void *from, unsigned long size)
31087+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
31088 {
31089 int d0, d1;
31090 __asm__ __volatile__(
31091@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31092 " .align 2,0x90\n"
31093 "3: movl 0(%4), %%eax\n"
31094 "4: movl 4(%4), %%edx\n"
31095- "5: movl %%eax, 0(%3)\n"
31096- "6: movl %%edx, 4(%3)\n"
31097+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
31098+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
31099 "7: movl 8(%4), %%eax\n"
31100 "8: movl 12(%4),%%edx\n"
31101- "9: movl %%eax, 8(%3)\n"
31102- "10: movl %%edx, 12(%3)\n"
31103+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
31104+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
31105 "11: movl 16(%4), %%eax\n"
31106 "12: movl 20(%4), %%edx\n"
31107- "13: movl %%eax, 16(%3)\n"
31108- "14: movl %%edx, 20(%3)\n"
31109+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
31110+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
31111 "15: movl 24(%4), %%eax\n"
31112 "16: movl 28(%4), %%edx\n"
31113- "17: movl %%eax, 24(%3)\n"
31114- "18: movl %%edx, 28(%3)\n"
31115+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
31116+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
31117 "19: movl 32(%4), %%eax\n"
31118 "20: movl 36(%4), %%edx\n"
31119- "21: movl %%eax, 32(%3)\n"
31120- "22: movl %%edx, 36(%3)\n"
31121+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
31122+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
31123 "23: movl 40(%4), %%eax\n"
31124 "24: movl 44(%4), %%edx\n"
31125- "25: movl %%eax, 40(%3)\n"
31126- "26: movl %%edx, 44(%3)\n"
31127+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
31128+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
31129 "27: movl 48(%4), %%eax\n"
31130 "28: movl 52(%4), %%edx\n"
31131- "29: movl %%eax, 48(%3)\n"
31132- "30: movl %%edx, 52(%3)\n"
31133+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
31134+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
31135 "31: movl 56(%4), %%eax\n"
31136 "32: movl 60(%4), %%edx\n"
31137- "33: movl %%eax, 56(%3)\n"
31138- "34: movl %%edx, 60(%3)\n"
31139+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
31140+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
31141 " addl $-64, %0\n"
31142 " addl $64, %4\n"
31143 " addl $64, %3\n"
31144@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31145 " shrl $2, %0\n"
31146 " andl $3, %%eax\n"
31147 " cld\n"
31148+ __COPYUSER_SET_ES
31149 "99: rep; movsl\n"
31150 "36: movl %%eax, %0\n"
31151 "37: rep; movsb\n"
31152 "100:\n"
31153+ __COPYUSER_RESTORE_ES
31154+ ".section .fixup,\"ax\"\n"
31155+ "101: lea 0(%%eax,%0,4),%0\n"
31156+ " jmp 100b\n"
31157+ ".previous\n"
31158+ _ASM_EXTABLE(1b,100b)
31159+ _ASM_EXTABLE(2b,100b)
31160+ _ASM_EXTABLE(3b,100b)
31161+ _ASM_EXTABLE(4b,100b)
31162+ _ASM_EXTABLE(5b,100b)
31163+ _ASM_EXTABLE(6b,100b)
31164+ _ASM_EXTABLE(7b,100b)
31165+ _ASM_EXTABLE(8b,100b)
31166+ _ASM_EXTABLE(9b,100b)
31167+ _ASM_EXTABLE(10b,100b)
31168+ _ASM_EXTABLE(11b,100b)
31169+ _ASM_EXTABLE(12b,100b)
31170+ _ASM_EXTABLE(13b,100b)
31171+ _ASM_EXTABLE(14b,100b)
31172+ _ASM_EXTABLE(15b,100b)
31173+ _ASM_EXTABLE(16b,100b)
31174+ _ASM_EXTABLE(17b,100b)
31175+ _ASM_EXTABLE(18b,100b)
31176+ _ASM_EXTABLE(19b,100b)
31177+ _ASM_EXTABLE(20b,100b)
31178+ _ASM_EXTABLE(21b,100b)
31179+ _ASM_EXTABLE(22b,100b)
31180+ _ASM_EXTABLE(23b,100b)
31181+ _ASM_EXTABLE(24b,100b)
31182+ _ASM_EXTABLE(25b,100b)
31183+ _ASM_EXTABLE(26b,100b)
31184+ _ASM_EXTABLE(27b,100b)
31185+ _ASM_EXTABLE(28b,100b)
31186+ _ASM_EXTABLE(29b,100b)
31187+ _ASM_EXTABLE(30b,100b)
31188+ _ASM_EXTABLE(31b,100b)
31189+ _ASM_EXTABLE(32b,100b)
31190+ _ASM_EXTABLE(33b,100b)
31191+ _ASM_EXTABLE(34b,100b)
31192+ _ASM_EXTABLE(35b,100b)
31193+ _ASM_EXTABLE(36b,100b)
31194+ _ASM_EXTABLE(37b,100b)
31195+ _ASM_EXTABLE(99b,101b)
31196+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
31197+ : "1"(to), "2"(from), "0"(size)
31198+ : "eax", "edx", "memory");
31199+ return size;
31200+}
31201+
31202+static unsigned long
31203+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
31204+{
31205+ int d0, d1;
31206+ __asm__ __volatile__(
31207+ " .align 2,0x90\n"
31208+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
31209+ " cmpl $67, %0\n"
31210+ " jbe 3f\n"
31211+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
31212+ " .align 2,0x90\n"
31213+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
31214+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
31215+ "5: movl %%eax, 0(%3)\n"
31216+ "6: movl %%edx, 4(%3)\n"
31217+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
31218+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
31219+ "9: movl %%eax, 8(%3)\n"
31220+ "10: movl %%edx, 12(%3)\n"
31221+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
31222+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
31223+ "13: movl %%eax, 16(%3)\n"
31224+ "14: movl %%edx, 20(%3)\n"
31225+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
31226+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
31227+ "17: movl %%eax, 24(%3)\n"
31228+ "18: movl %%edx, 28(%3)\n"
31229+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
31230+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
31231+ "21: movl %%eax, 32(%3)\n"
31232+ "22: movl %%edx, 36(%3)\n"
31233+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
31234+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
31235+ "25: movl %%eax, 40(%3)\n"
31236+ "26: movl %%edx, 44(%3)\n"
31237+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
31238+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
31239+ "29: movl %%eax, 48(%3)\n"
31240+ "30: movl %%edx, 52(%3)\n"
31241+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
31242+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
31243+ "33: movl %%eax, 56(%3)\n"
31244+ "34: movl %%edx, 60(%3)\n"
31245+ " addl $-64, %0\n"
31246+ " addl $64, %4\n"
31247+ " addl $64, %3\n"
31248+ " cmpl $63, %0\n"
31249+ " ja 1b\n"
31250+ "35: movl %0, %%eax\n"
31251+ " shrl $2, %0\n"
31252+ " andl $3, %%eax\n"
31253+ " cld\n"
31254+ "99: rep; "__copyuser_seg" movsl\n"
31255+ "36: movl %%eax, %0\n"
31256+ "37: rep; "__copyuser_seg" movsb\n"
31257+ "100:\n"
31258 ".section .fixup,\"ax\"\n"
31259 "101: lea 0(%%eax,%0,4),%0\n"
31260 " jmp 100b\n"
31261@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31262 int d0, d1;
31263 __asm__ __volatile__(
31264 " .align 2,0x90\n"
31265- "0: movl 32(%4), %%eax\n"
31266+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31267 " cmpl $67, %0\n"
31268 " jbe 2f\n"
31269- "1: movl 64(%4), %%eax\n"
31270+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31271 " .align 2,0x90\n"
31272- "2: movl 0(%4), %%eax\n"
31273- "21: movl 4(%4), %%edx\n"
31274+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31275+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31276 " movl %%eax, 0(%3)\n"
31277 " movl %%edx, 4(%3)\n"
31278- "3: movl 8(%4), %%eax\n"
31279- "31: movl 12(%4),%%edx\n"
31280+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31281+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31282 " movl %%eax, 8(%3)\n"
31283 " movl %%edx, 12(%3)\n"
31284- "4: movl 16(%4), %%eax\n"
31285- "41: movl 20(%4), %%edx\n"
31286+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31287+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31288 " movl %%eax, 16(%3)\n"
31289 " movl %%edx, 20(%3)\n"
31290- "10: movl 24(%4), %%eax\n"
31291- "51: movl 28(%4), %%edx\n"
31292+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31293+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31294 " movl %%eax, 24(%3)\n"
31295 " movl %%edx, 28(%3)\n"
31296- "11: movl 32(%4), %%eax\n"
31297- "61: movl 36(%4), %%edx\n"
31298+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31299+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31300 " movl %%eax, 32(%3)\n"
31301 " movl %%edx, 36(%3)\n"
31302- "12: movl 40(%4), %%eax\n"
31303- "71: movl 44(%4), %%edx\n"
31304+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31305+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31306 " movl %%eax, 40(%3)\n"
31307 " movl %%edx, 44(%3)\n"
31308- "13: movl 48(%4), %%eax\n"
31309- "81: movl 52(%4), %%edx\n"
31310+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31311+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31312 " movl %%eax, 48(%3)\n"
31313 " movl %%edx, 52(%3)\n"
31314- "14: movl 56(%4), %%eax\n"
31315- "91: movl 60(%4), %%edx\n"
31316+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31317+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31318 " movl %%eax, 56(%3)\n"
31319 " movl %%edx, 60(%3)\n"
31320 " addl $-64, %0\n"
31321@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31322 " shrl $2, %0\n"
31323 " andl $3, %%eax\n"
31324 " cld\n"
31325- "6: rep; movsl\n"
31326+ "6: rep; "__copyuser_seg" movsl\n"
31327 " movl %%eax,%0\n"
31328- "7: rep; movsb\n"
31329+ "7: rep; "__copyuser_seg" movsb\n"
31330 "8:\n"
31331 ".section .fixup,\"ax\"\n"
31332 "9: lea 0(%%eax,%0,4),%0\n"
31333@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31334
31335 __asm__ __volatile__(
31336 " .align 2,0x90\n"
31337- "0: movl 32(%4), %%eax\n"
31338+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31339 " cmpl $67, %0\n"
31340 " jbe 2f\n"
31341- "1: movl 64(%4), %%eax\n"
31342+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31343 " .align 2,0x90\n"
31344- "2: movl 0(%4), %%eax\n"
31345- "21: movl 4(%4), %%edx\n"
31346+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31347+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31348 " movnti %%eax, 0(%3)\n"
31349 " movnti %%edx, 4(%3)\n"
31350- "3: movl 8(%4), %%eax\n"
31351- "31: movl 12(%4),%%edx\n"
31352+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31353+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31354 " movnti %%eax, 8(%3)\n"
31355 " movnti %%edx, 12(%3)\n"
31356- "4: movl 16(%4), %%eax\n"
31357- "41: movl 20(%4), %%edx\n"
31358+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31359+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31360 " movnti %%eax, 16(%3)\n"
31361 " movnti %%edx, 20(%3)\n"
31362- "10: movl 24(%4), %%eax\n"
31363- "51: movl 28(%4), %%edx\n"
31364+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31365+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31366 " movnti %%eax, 24(%3)\n"
31367 " movnti %%edx, 28(%3)\n"
31368- "11: movl 32(%4), %%eax\n"
31369- "61: movl 36(%4), %%edx\n"
31370+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31371+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31372 " movnti %%eax, 32(%3)\n"
31373 " movnti %%edx, 36(%3)\n"
31374- "12: movl 40(%4), %%eax\n"
31375- "71: movl 44(%4), %%edx\n"
31376+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31377+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31378 " movnti %%eax, 40(%3)\n"
31379 " movnti %%edx, 44(%3)\n"
31380- "13: movl 48(%4), %%eax\n"
31381- "81: movl 52(%4), %%edx\n"
31382+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31383+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31384 " movnti %%eax, 48(%3)\n"
31385 " movnti %%edx, 52(%3)\n"
31386- "14: movl 56(%4), %%eax\n"
31387- "91: movl 60(%4), %%edx\n"
31388+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31389+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31390 " movnti %%eax, 56(%3)\n"
31391 " movnti %%edx, 60(%3)\n"
31392 " addl $-64, %0\n"
31393@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31394 " shrl $2, %0\n"
31395 " andl $3, %%eax\n"
31396 " cld\n"
31397- "6: rep; movsl\n"
31398+ "6: rep; "__copyuser_seg" movsl\n"
31399 " movl %%eax,%0\n"
31400- "7: rep; movsb\n"
31401+ "7: rep; "__copyuser_seg" movsb\n"
31402 "8:\n"
31403 ".section .fixup,\"ax\"\n"
31404 "9: lea 0(%%eax,%0,4),%0\n"
31405@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
31406
31407 __asm__ __volatile__(
31408 " .align 2,0x90\n"
31409- "0: movl 32(%4), %%eax\n"
31410+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31411 " cmpl $67, %0\n"
31412 " jbe 2f\n"
31413- "1: movl 64(%4), %%eax\n"
31414+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31415 " .align 2,0x90\n"
31416- "2: movl 0(%4), %%eax\n"
31417- "21: movl 4(%4), %%edx\n"
31418+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31419+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31420 " movnti %%eax, 0(%3)\n"
31421 " movnti %%edx, 4(%3)\n"
31422- "3: movl 8(%4), %%eax\n"
31423- "31: movl 12(%4),%%edx\n"
31424+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31425+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31426 " movnti %%eax, 8(%3)\n"
31427 " movnti %%edx, 12(%3)\n"
31428- "4: movl 16(%4), %%eax\n"
31429- "41: movl 20(%4), %%edx\n"
31430+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31431+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31432 " movnti %%eax, 16(%3)\n"
31433 " movnti %%edx, 20(%3)\n"
31434- "10: movl 24(%4), %%eax\n"
31435- "51: movl 28(%4), %%edx\n"
31436+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31437+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31438 " movnti %%eax, 24(%3)\n"
31439 " movnti %%edx, 28(%3)\n"
31440- "11: movl 32(%4), %%eax\n"
31441- "61: movl 36(%4), %%edx\n"
31442+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31443+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31444 " movnti %%eax, 32(%3)\n"
31445 " movnti %%edx, 36(%3)\n"
31446- "12: movl 40(%4), %%eax\n"
31447- "71: movl 44(%4), %%edx\n"
31448+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31449+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31450 " movnti %%eax, 40(%3)\n"
31451 " movnti %%edx, 44(%3)\n"
31452- "13: movl 48(%4), %%eax\n"
31453- "81: movl 52(%4), %%edx\n"
31454+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31455+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31456 " movnti %%eax, 48(%3)\n"
31457 " movnti %%edx, 52(%3)\n"
31458- "14: movl 56(%4), %%eax\n"
31459- "91: movl 60(%4), %%edx\n"
31460+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31461+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31462 " movnti %%eax, 56(%3)\n"
31463 " movnti %%edx, 60(%3)\n"
31464 " addl $-64, %0\n"
31465@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
31466 " shrl $2, %0\n"
31467 " andl $3, %%eax\n"
31468 " cld\n"
31469- "6: rep; movsl\n"
31470+ "6: rep; "__copyuser_seg" movsl\n"
31471 " movl %%eax,%0\n"
31472- "7: rep; movsb\n"
31473+ "7: rep; "__copyuser_seg" movsb\n"
31474 "8:\n"
31475 ".section .fixup,\"ax\"\n"
31476 "9: lea 0(%%eax,%0,4),%0\n"
31477@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
31478 */
31479 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
31480 unsigned long size);
31481-unsigned long __copy_user_intel(void __user *to, const void *from,
31482+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
31483+ unsigned long size);
31484+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
31485 unsigned long size);
31486 unsigned long __copy_user_zeroing_intel_nocache(void *to,
31487 const void __user *from, unsigned long size);
31488 #endif /* CONFIG_X86_INTEL_USERCOPY */
31489
31490 /* Generic arbitrary sized copy. */
31491-#define __copy_user(to, from, size) \
31492+#define __copy_user(to, from, size, prefix, set, restore) \
31493 do { \
31494 int __d0, __d1, __d2; \
31495 __asm__ __volatile__( \
31496+ set \
31497 " cmp $7,%0\n" \
31498 " jbe 1f\n" \
31499 " movl %1,%0\n" \
31500 " negl %0\n" \
31501 " andl $7,%0\n" \
31502 " subl %0,%3\n" \
31503- "4: rep; movsb\n" \
31504+ "4: rep; "prefix"movsb\n" \
31505 " movl %3,%0\n" \
31506 " shrl $2,%0\n" \
31507 " andl $3,%3\n" \
31508 " .align 2,0x90\n" \
31509- "0: rep; movsl\n" \
31510+ "0: rep; "prefix"movsl\n" \
31511 " movl %3,%0\n" \
31512- "1: rep; movsb\n" \
31513+ "1: rep; "prefix"movsb\n" \
31514 "2:\n" \
31515+ restore \
31516 ".section .fixup,\"ax\"\n" \
31517 "5: addl %3,%0\n" \
31518 " jmp 2b\n" \
31519@@ -538,14 +650,14 @@ do { \
31520 " negl %0\n" \
31521 " andl $7,%0\n" \
31522 " subl %0,%3\n" \
31523- "4: rep; movsb\n" \
31524+ "4: rep; "__copyuser_seg"movsb\n" \
31525 " movl %3,%0\n" \
31526 " shrl $2,%0\n" \
31527 " andl $3,%3\n" \
31528 " .align 2,0x90\n" \
31529- "0: rep; movsl\n" \
31530+ "0: rep; "__copyuser_seg"movsl\n" \
31531 " movl %3,%0\n" \
31532- "1: rep; movsb\n" \
31533+ "1: rep; "__copyuser_seg"movsb\n" \
31534 "2:\n" \
31535 ".section .fixup,\"ax\"\n" \
31536 "5: addl %3,%0\n" \
31537@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
31538 {
31539 stac();
31540 if (movsl_is_ok(to, from, n))
31541- __copy_user(to, from, n);
31542+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
31543 else
31544- n = __copy_user_intel(to, from, n);
31545+ n = __generic_copy_to_user_intel(to, from, n);
31546 clac();
31547 return n;
31548 }
31549@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
31550 {
31551 stac();
31552 if (movsl_is_ok(to, from, n))
31553- __copy_user(to, from, n);
31554+ __copy_user(to, from, n, __copyuser_seg, "", "");
31555 else
31556- n = __copy_user_intel((void __user *)to,
31557- (const void *)from, n);
31558+ n = __generic_copy_from_user_intel(to, from, n);
31559 clac();
31560 return n;
31561 }
31562@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
31563 if (n > 64 && cpu_has_xmm2)
31564 n = __copy_user_intel_nocache(to, from, n);
31565 else
31566- __copy_user(to, from, n);
31567+ __copy_user(to, from, n, __copyuser_seg, "", "");
31568 #else
31569- __copy_user(to, from, n);
31570+ __copy_user(to, from, n, __copyuser_seg, "", "");
31571 #endif
31572 clac();
31573 return n;
31574 }
31575 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
31576
31577-/**
31578- * copy_to_user: - Copy a block of data into user space.
31579- * @to: Destination address, in user space.
31580- * @from: Source address, in kernel space.
31581- * @n: Number of bytes to copy.
31582- *
31583- * Context: User context only. This function may sleep.
31584- *
31585- * Copy data from kernel space to user space.
31586- *
31587- * Returns number of bytes that could not be copied.
31588- * On success, this will be zero.
31589- */
31590-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
31591+#ifdef CONFIG_PAX_MEMORY_UDEREF
31592+void __set_fs(mm_segment_t x)
31593 {
31594- if (access_ok(VERIFY_WRITE, to, n))
31595- n = __copy_to_user(to, from, n);
31596- return n;
31597+ switch (x.seg) {
31598+ case 0:
31599+ loadsegment(gs, 0);
31600+ break;
31601+ case TASK_SIZE_MAX:
31602+ loadsegment(gs, __USER_DS);
31603+ break;
31604+ case -1UL:
31605+ loadsegment(gs, __KERNEL_DS);
31606+ break;
31607+ default:
31608+ BUG();
31609+ }
31610 }
31611-EXPORT_SYMBOL(_copy_to_user);
31612+EXPORT_SYMBOL(__set_fs);
31613
31614-/**
31615- * copy_from_user: - Copy a block of data from user space.
31616- * @to: Destination address, in kernel space.
31617- * @from: Source address, in user space.
31618- * @n: Number of bytes to copy.
31619- *
31620- * Context: User context only. This function may sleep.
31621- *
31622- * Copy data from user space to kernel space.
31623- *
31624- * Returns number of bytes that could not be copied.
31625- * On success, this will be zero.
31626- *
31627- * If some data could not be copied, this function will pad the copied
31628- * data to the requested size using zero bytes.
31629- */
31630-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
31631+void set_fs(mm_segment_t x)
31632 {
31633- if (access_ok(VERIFY_READ, from, n))
31634- n = __copy_from_user(to, from, n);
31635- else
31636- memset(to, 0, n);
31637- return n;
31638+ current_thread_info()->addr_limit = x;
31639+ __set_fs(x);
31640 }
31641-EXPORT_SYMBOL(_copy_from_user);
31642+EXPORT_SYMBOL(set_fs);
31643+#endif
31644diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
31645index 0a42327..7a82465 100644
31646--- a/arch/x86/lib/usercopy_64.c
31647+++ b/arch/x86/lib/usercopy_64.c
31648@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31649 might_fault();
31650 /* no memory constraint because it doesn't change any memory gcc knows
31651 about */
31652+ pax_open_userland();
31653 stac();
31654 asm volatile(
31655 " testq %[size8],%[size8]\n"
31656@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31657 _ASM_EXTABLE(0b,3b)
31658 _ASM_EXTABLE(1b,2b)
31659 : [size8] "=&c"(size), [dst] "=&D" (__d0)
31660- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
31661+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
31662 [zero] "r" (0UL), [eight] "r" (8UL));
31663 clac();
31664+ pax_close_userland();
31665 return size;
31666 }
31667 EXPORT_SYMBOL(__clear_user);
31668@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
31669 }
31670 EXPORT_SYMBOL(clear_user);
31671
31672-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
31673+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
31674 {
31675- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
31676- return copy_user_generic((__force void *)to, (__force void *)from, len);
31677- }
31678- return len;
31679+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
31680+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
31681+ return len;
31682 }
31683 EXPORT_SYMBOL(copy_in_user);
31684
31685@@ -69,8 +70,10 @@ EXPORT_SYMBOL(copy_in_user);
31686 * it is not necessary to optimize tail handling.
31687 */
31688 __visible unsigned long
31689-copy_user_handle_tail(char *to, char *from, unsigned len)
31690+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len)
31691 {
31692+ clac();
31693+ pax_close_userland();
31694 for (; len; --len, to++) {
31695 char c;
31696
31697@@ -79,10 +82,9 @@ copy_user_handle_tail(char *to, char *from, unsigned len)
31698 if (__put_user_nocheck(c, to, sizeof(char)))
31699 break;
31700 }
31701- clac();
31702
31703 /* If the destination is a kernel buffer, we always clear the end */
31704- if (!__addr_ok(to))
31705+ if (!__addr_ok(to) && (unsigned long)to >= TASK_SIZE_MAX + pax_user_shadow_base)
31706 memset(to, 0, len);
31707 return len;
31708 }
31709diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
31710index c4cc740..60a7362 100644
31711--- a/arch/x86/mm/Makefile
31712+++ b/arch/x86/mm/Makefile
31713@@ -35,3 +35,7 @@ obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
31714 obj-$(CONFIG_MEMTEST) += memtest.o
31715
31716 obj-$(CONFIG_X86_INTEL_MPX) += mpx.o
31717+
31718+quote:="
31719+obj-$(CONFIG_X86_64) += uderef_64.o
31720+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
31721diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
31722index 903ec1e..c4166b2 100644
31723--- a/arch/x86/mm/extable.c
31724+++ b/arch/x86/mm/extable.c
31725@@ -6,12 +6,24 @@
31726 static inline unsigned long
31727 ex_insn_addr(const struct exception_table_entry *x)
31728 {
31729- return (unsigned long)&x->insn + x->insn;
31730+ unsigned long reloc = 0;
31731+
31732+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31733+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31734+#endif
31735+
31736+ return (unsigned long)&x->insn + x->insn + reloc;
31737 }
31738 static inline unsigned long
31739 ex_fixup_addr(const struct exception_table_entry *x)
31740 {
31741- return (unsigned long)&x->fixup + x->fixup;
31742+ unsigned long reloc = 0;
31743+
31744+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31745+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31746+#endif
31747+
31748+ return (unsigned long)&x->fixup + x->fixup + reloc;
31749 }
31750
31751 int fixup_exception(struct pt_regs *regs)
31752@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
31753 unsigned long new_ip;
31754
31755 #ifdef CONFIG_PNPBIOS
31756- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
31757+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
31758 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
31759 extern u32 pnp_bios_is_utter_crap;
31760 pnp_bios_is_utter_crap = 1;
31761@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
31762 i += 4;
31763 p->fixup -= i;
31764 i += 4;
31765+
31766+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31767+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
31768+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31769+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31770+#endif
31771+
31772 }
31773 }
31774
31775diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
31776index ede025f..380466b 100644
31777--- a/arch/x86/mm/fault.c
31778+++ b/arch/x86/mm/fault.c
31779@@ -13,12 +13,19 @@
31780 #include <linux/hugetlb.h> /* hstate_index_to_shift */
31781 #include <linux/prefetch.h> /* prefetchw */
31782 #include <linux/context_tracking.h> /* exception_enter(), ... */
31783+#include <linux/unistd.h>
31784+#include <linux/compiler.h>
31785
31786 #include <asm/traps.h> /* dotraplinkage, ... */
31787 #include <asm/pgalloc.h> /* pgd_*(), ... */
31788 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
31789 #include <asm/fixmap.h> /* VSYSCALL_ADDR */
31790 #include <asm/vsyscall.h> /* emulate_vsyscall */
31791+#include <asm/tlbflush.h>
31792+
31793+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31794+#include <asm/stacktrace.h>
31795+#endif
31796
31797 #define CREATE_TRACE_POINTS
31798 #include <asm/trace/exceptions.h>
31799@@ -59,7 +66,7 @@ static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
31800 int ret = 0;
31801
31802 /* kprobe_running() needs smp_processor_id() */
31803- if (kprobes_built_in() && !user_mode_vm(regs)) {
31804+ if (kprobes_built_in() && !user_mode(regs)) {
31805 preempt_disable();
31806 if (kprobe_running() && kprobe_fault_handler(regs, 14))
31807 ret = 1;
31808@@ -120,7 +127,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
31809 return !instr_lo || (instr_lo>>1) == 1;
31810 case 0x00:
31811 /* Prefetch instruction is 0x0F0D or 0x0F18 */
31812- if (probe_kernel_address(instr, opcode))
31813+ if (user_mode(regs)) {
31814+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31815+ return 0;
31816+ } else if (probe_kernel_address(instr, opcode))
31817 return 0;
31818
31819 *prefetch = (instr_lo == 0xF) &&
31820@@ -154,7 +164,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
31821 while (instr < max_instr) {
31822 unsigned char opcode;
31823
31824- if (probe_kernel_address(instr, opcode))
31825+ if (user_mode(regs)) {
31826+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31827+ break;
31828+ } else if (probe_kernel_address(instr, opcode))
31829 break;
31830
31831 instr++;
31832@@ -185,6 +198,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
31833 force_sig_info(si_signo, &info, tsk);
31834 }
31835
31836+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31837+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
31838+#endif
31839+
31840+#ifdef CONFIG_PAX_EMUTRAMP
31841+static int pax_handle_fetch_fault(struct pt_regs *regs);
31842+#endif
31843+
31844+#ifdef CONFIG_PAX_PAGEEXEC
31845+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
31846+{
31847+ pgd_t *pgd;
31848+ pud_t *pud;
31849+ pmd_t *pmd;
31850+
31851+ pgd = pgd_offset(mm, address);
31852+ if (!pgd_present(*pgd))
31853+ return NULL;
31854+ pud = pud_offset(pgd, address);
31855+ if (!pud_present(*pud))
31856+ return NULL;
31857+ pmd = pmd_offset(pud, address);
31858+ if (!pmd_present(*pmd))
31859+ return NULL;
31860+ return pmd;
31861+}
31862+#endif
31863+
31864 DEFINE_SPINLOCK(pgd_lock);
31865 LIST_HEAD(pgd_list);
31866
31867@@ -235,10 +276,27 @@ void vmalloc_sync_all(void)
31868 for (address = VMALLOC_START & PMD_MASK;
31869 address >= TASK_SIZE && address < FIXADDR_TOP;
31870 address += PMD_SIZE) {
31871+
31872+#ifdef CONFIG_PAX_PER_CPU_PGD
31873+ unsigned long cpu;
31874+#else
31875 struct page *page;
31876+#endif
31877
31878 spin_lock(&pgd_lock);
31879+
31880+#ifdef CONFIG_PAX_PER_CPU_PGD
31881+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
31882+ pgd_t *pgd = get_cpu_pgd(cpu, user);
31883+ pmd_t *ret;
31884+
31885+ ret = vmalloc_sync_one(pgd, address);
31886+ if (!ret)
31887+ break;
31888+ pgd = get_cpu_pgd(cpu, kernel);
31889+#else
31890 list_for_each_entry(page, &pgd_list, lru) {
31891+ pgd_t *pgd;
31892 spinlock_t *pgt_lock;
31893 pmd_t *ret;
31894
31895@@ -246,8 +304,14 @@ void vmalloc_sync_all(void)
31896 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
31897
31898 spin_lock(pgt_lock);
31899- ret = vmalloc_sync_one(page_address(page), address);
31900+ pgd = page_address(page);
31901+#endif
31902+
31903+ ret = vmalloc_sync_one(pgd, address);
31904+
31905+#ifndef CONFIG_PAX_PER_CPU_PGD
31906 spin_unlock(pgt_lock);
31907+#endif
31908
31909 if (!ret)
31910 break;
31911@@ -281,6 +345,12 @@ static noinline int vmalloc_fault(unsigned long address)
31912 * an interrupt in the middle of a task switch..
31913 */
31914 pgd_paddr = read_cr3();
31915+
31916+#ifdef CONFIG_PAX_PER_CPU_PGD
31917+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
31918+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
31919+#endif
31920+
31921 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
31922 if (!pmd_k)
31923 return -1;
31924@@ -377,11 +447,25 @@ static noinline int vmalloc_fault(unsigned long address)
31925 * happen within a race in page table update. In the later
31926 * case just flush:
31927 */
31928- pgd = pgd_offset(current->active_mm, address);
31929+
31930 pgd_ref = pgd_offset_k(address);
31931 if (pgd_none(*pgd_ref))
31932 return -1;
31933
31934+#ifdef CONFIG_PAX_PER_CPU_PGD
31935+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
31936+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
31937+ if (pgd_none(*pgd)) {
31938+ set_pgd(pgd, *pgd_ref);
31939+ arch_flush_lazy_mmu_mode();
31940+ } else {
31941+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
31942+ }
31943+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
31944+#else
31945+ pgd = pgd_offset(current->active_mm, address);
31946+#endif
31947+
31948 if (pgd_none(*pgd)) {
31949 set_pgd(pgd, *pgd_ref);
31950 arch_flush_lazy_mmu_mode();
31951@@ -548,7 +632,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
31952 static int is_errata100(struct pt_regs *regs, unsigned long address)
31953 {
31954 #ifdef CONFIG_X86_64
31955- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
31956+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
31957 return 1;
31958 #endif
31959 return 0;
31960@@ -575,9 +659,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
31961 }
31962
31963 static const char nx_warning[] = KERN_CRIT
31964-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
31965+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
31966 static const char smep_warning[] = KERN_CRIT
31967-"unable to execute userspace code (SMEP?) (uid: %d)\n";
31968+"unable to execute userspace code (SMEP?) (uid: %d, task: %s, pid: %d)\n";
31969
31970 static void
31971 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31972@@ -586,7 +670,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31973 if (!oops_may_print())
31974 return;
31975
31976- if (error_code & PF_INSTR) {
31977+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
31978 unsigned int level;
31979 pgd_t *pgd;
31980 pte_t *pte;
31981@@ -597,13 +681,25 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31982 pte = lookup_address_in_pgd(pgd, address, &level);
31983
31984 if (pte && pte_present(*pte) && !pte_exec(*pte))
31985- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
31986+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
31987 if (pte && pte_present(*pte) && pte_exec(*pte) &&
31988 (pgd_flags(*pgd) & _PAGE_USER) &&
31989 (__read_cr4() & X86_CR4_SMEP))
31990- printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
31991+ printk(smep_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
31992 }
31993
31994+#ifdef CONFIG_PAX_KERNEXEC
31995+ if (init_mm.start_code <= address && address < init_mm.end_code) {
31996+ if (current->signal->curr_ip)
31997+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
31998+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
31999+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
32000+ else
32001+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
32002+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
32003+ }
32004+#endif
32005+
32006 printk(KERN_ALERT "BUG: unable to handle kernel ");
32007 if (address < PAGE_SIZE)
32008 printk(KERN_CONT "NULL pointer dereference");
32009@@ -782,6 +878,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
32010 return;
32011 }
32012 #endif
32013+
32014+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32015+ if (pax_is_fetch_fault(regs, error_code, address)) {
32016+
32017+#ifdef CONFIG_PAX_EMUTRAMP
32018+ switch (pax_handle_fetch_fault(regs)) {
32019+ case 2:
32020+ return;
32021+ }
32022+#endif
32023+
32024+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32025+ do_group_exit(SIGKILL);
32026+ }
32027+#endif
32028+
32029 /* Kernel addresses are always protection faults: */
32030 if (address >= TASK_SIZE)
32031 error_code |= PF_PROT;
32032@@ -864,7 +976,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
32033 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
32034 printk(KERN_ERR
32035 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
32036- tsk->comm, tsk->pid, address);
32037+ tsk->comm, task_pid_nr(tsk), address);
32038 code = BUS_MCEERR_AR;
32039 }
32040 #endif
32041@@ -916,6 +1028,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
32042 return 1;
32043 }
32044
32045+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32046+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
32047+{
32048+ pte_t *pte;
32049+ pmd_t *pmd;
32050+ spinlock_t *ptl;
32051+ unsigned char pte_mask;
32052+
32053+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
32054+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
32055+ return 0;
32056+
32057+ /* PaX: it's our fault, let's handle it if we can */
32058+
32059+ /* PaX: take a look at read faults before acquiring any locks */
32060+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
32061+ /* instruction fetch attempt from a protected page in user mode */
32062+ up_read(&mm->mmap_sem);
32063+
32064+#ifdef CONFIG_PAX_EMUTRAMP
32065+ switch (pax_handle_fetch_fault(regs)) {
32066+ case 2:
32067+ return 1;
32068+ }
32069+#endif
32070+
32071+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32072+ do_group_exit(SIGKILL);
32073+ }
32074+
32075+ pmd = pax_get_pmd(mm, address);
32076+ if (unlikely(!pmd))
32077+ return 0;
32078+
32079+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
32080+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
32081+ pte_unmap_unlock(pte, ptl);
32082+ return 0;
32083+ }
32084+
32085+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
32086+ /* write attempt to a protected page in user mode */
32087+ pte_unmap_unlock(pte, ptl);
32088+ return 0;
32089+ }
32090+
32091+#ifdef CONFIG_SMP
32092+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
32093+#else
32094+ if (likely(address > get_limit(regs->cs)))
32095+#endif
32096+ {
32097+ set_pte(pte, pte_mkread(*pte));
32098+ __flush_tlb_one(address);
32099+ pte_unmap_unlock(pte, ptl);
32100+ up_read(&mm->mmap_sem);
32101+ return 1;
32102+ }
32103+
32104+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
32105+
32106+ /*
32107+ * PaX: fill DTLB with user rights and retry
32108+ */
32109+ __asm__ __volatile__ (
32110+ "orb %2,(%1)\n"
32111+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
32112+/*
32113+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
32114+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
32115+ * page fault when examined during a TLB load attempt. this is true not only
32116+ * for PTEs holding a non-present entry but also present entries that will
32117+ * raise a page fault (such as those set up by PaX, or the copy-on-write
32118+ * mechanism). in effect it means that we do *not* need to flush the TLBs
32119+ * for our target pages since their PTEs are simply not in the TLBs at all.
32120+
32121+ * the best thing in omitting it is that we gain around 15-20% speed in the
32122+ * fast path of the page fault handler and can get rid of tracing since we
32123+ * can no longer flush unintended entries.
32124+ */
32125+ "invlpg (%0)\n"
32126+#endif
32127+ __copyuser_seg"testb $0,(%0)\n"
32128+ "xorb %3,(%1)\n"
32129+ :
32130+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
32131+ : "memory", "cc");
32132+ pte_unmap_unlock(pte, ptl);
32133+ up_read(&mm->mmap_sem);
32134+ return 1;
32135+}
32136+#endif
32137+
32138 /*
32139 * Handle a spurious fault caused by a stale TLB entry.
32140 *
32141@@ -1001,6 +1206,9 @@ int show_unhandled_signals = 1;
32142 static inline int
32143 access_error(unsigned long error_code, struct vm_area_struct *vma)
32144 {
32145+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
32146+ return 1;
32147+
32148 if (error_code & PF_WRITE) {
32149 /* write, present and write, not present: */
32150 if (unlikely(!(vma->vm_flags & VM_WRITE)))
32151@@ -1035,7 +1243,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
32152 if (error_code & PF_USER)
32153 return false;
32154
32155- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
32156+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
32157 return false;
32158
32159 return true;
32160@@ -1063,6 +1271,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32161 tsk = current;
32162 mm = tsk->mm;
32163
32164+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32165+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
32166+ if (!search_exception_tables(regs->ip)) {
32167+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32168+ bad_area_nosemaphore(regs, error_code, address);
32169+ return;
32170+ }
32171+ if (address < pax_user_shadow_base) {
32172+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32173+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
32174+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
32175+ } else
32176+ address -= pax_user_shadow_base;
32177+ }
32178+#endif
32179+
32180 /*
32181 * Detect and handle instructions that would cause a page fault for
32182 * both a tracked kernel page and a userspace page.
32183@@ -1140,7 +1364,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32184 * User-mode registers count as a user access even for any
32185 * potential system fault or CPU buglet:
32186 */
32187- if (user_mode_vm(regs)) {
32188+ if (user_mode(regs)) {
32189 local_irq_enable();
32190 error_code |= PF_USER;
32191 flags |= FAULT_FLAG_USER;
32192@@ -1187,6 +1411,11 @@ retry:
32193 might_sleep();
32194 }
32195
32196+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32197+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
32198+ return;
32199+#endif
32200+
32201 vma = find_vma(mm, address);
32202 if (unlikely(!vma)) {
32203 bad_area(regs, error_code, address);
32204@@ -1198,18 +1427,24 @@ retry:
32205 bad_area(regs, error_code, address);
32206 return;
32207 }
32208- if (error_code & PF_USER) {
32209- /*
32210- * Accessing the stack below %sp is always a bug.
32211- * The large cushion allows instructions like enter
32212- * and pusha to work. ("enter $65535, $31" pushes
32213- * 32 pointers and then decrements %sp by 65535.)
32214- */
32215- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
32216- bad_area(regs, error_code, address);
32217- return;
32218- }
32219+ /*
32220+ * Accessing the stack below %sp is always a bug.
32221+ * The large cushion allows instructions like enter
32222+ * and pusha to work. ("enter $65535, $31" pushes
32223+ * 32 pointers and then decrements %sp by 65535.)
32224+ */
32225+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
32226+ bad_area(regs, error_code, address);
32227+ return;
32228 }
32229+
32230+#ifdef CONFIG_PAX_SEGMEXEC
32231+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
32232+ bad_area(regs, error_code, address);
32233+ return;
32234+ }
32235+#endif
32236+
32237 if (unlikely(expand_stack(vma, address))) {
32238 bad_area(regs, error_code, address);
32239 return;
32240@@ -1329,3 +1564,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
32241 }
32242 NOKPROBE_SYMBOL(trace_do_page_fault);
32243 #endif /* CONFIG_TRACING */
32244+
32245+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32246+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
32247+{
32248+ struct mm_struct *mm = current->mm;
32249+ unsigned long ip = regs->ip;
32250+
32251+ if (v8086_mode(regs))
32252+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
32253+
32254+#ifdef CONFIG_PAX_PAGEEXEC
32255+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
32256+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
32257+ return true;
32258+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
32259+ return true;
32260+ return false;
32261+ }
32262+#endif
32263+
32264+#ifdef CONFIG_PAX_SEGMEXEC
32265+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
32266+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
32267+ return true;
32268+ return false;
32269+ }
32270+#endif
32271+
32272+ return false;
32273+}
32274+#endif
32275+
32276+#ifdef CONFIG_PAX_EMUTRAMP
32277+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
32278+{
32279+ int err;
32280+
32281+ do { /* PaX: libffi trampoline emulation */
32282+ unsigned char mov, jmp;
32283+ unsigned int addr1, addr2;
32284+
32285+#ifdef CONFIG_X86_64
32286+ if ((regs->ip + 9) >> 32)
32287+ break;
32288+#endif
32289+
32290+ err = get_user(mov, (unsigned char __user *)regs->ip);
32291+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32292+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32293+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32294+
32295+ if (err)
32296+ break;
32297+
32298+ if (mov == 0xB8 && jmp == 0xE9) {
32299+ regs->ax = addr1;
32300+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32301+ return 2;
32302+ }
32303+ } while (0);
32304+
32305+ do { /* PaX: gcc trampoline emulation #1 */
32306+ unsigned char mov1, mov2;
32307+ unsigned short jmp;
32308+ unsigned int addr1, addr2;
32309+
32310+#ifdef CONFIG_X86_64
32311+ if ((regs->ip + 11) >> 32)
32312+ break;
32313+#endif
32314+
32315+ err = get_user(mov1, (unsigned char __user *)regs->ip);
32316+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32317+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
32318+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32319+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
32320+
32321+ if (err)
32322+ break;
32323+
32324+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
32325+ regs->cx = addr1;
32326+ regs->ax = addr2;
32327+ regs->ip = addr2;
32328+ return 2;
32329+ }
32330+ } while (0);
32331+
32332+ do { /* PaX: gcc trampoline emulation #2 */
32333+ unsigned char mov, jmp;
32334+ unsigned int addr1, addr2;
32335+
32336+#ifdef CONFIG_X86_64
32337+ if ((regs->ip + 9) >> 32)
32338+ break;
32339+#endif
32340+
32341+ err = get_user(mov, (unsigned char __user *)regs->ip);
32342+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32343+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32344+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32345+
32346+ if (err)
32347+ break;
32348+
32349+ if (mov == 0xB9 && jmp == 0xE9) {
32350+ regs->cx = addr1;
32351+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32352+ return 2;
32353+ }
32354+ } while (0);
32355+
32356+ return 1; /* PaX in action */
32357+}
32358+
32359+#ifdef CONFIG_X86_64
32360+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
32361+{
32362+ int err;
32363+
32364+ do { /* PaX: libffi trampoline emulation */
32365+ unsigned short mov1, mov2, jmp1;
32366+ unsigned char stcclc, jmp2;
32367+ unsigned long addr1, addr2;
32368+
32369+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32370+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32371+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32372+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32373+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
32374+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
32375+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
32376+
32377+ if (err)
32378+ break;
32379+
32380+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32381+ regs->r11 = addr1;
32382+ regs->r10 = addr2;
32383+ if (stcclc == 0xF8)
32384+ regs->flags &= ~X86_EFLAGS_CF;
32385+ else
32386+ regs->flags |= X86_EFLAGS_CF;
32387+ regs->ip = addr1;
32388+ return 2;
32389+ }
32390+ } while (0);
32391+
32392+ do { /* PaX: gcc trampoline emulation #1 */
32393+ unsigned short mov1, mov2, jmp1;
32394+ unsigned char jmp2;
32395+ unsigned int addr1;
32396+ unsigned long addr2;
32397+
32398+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32399+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
32400+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
32401+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
32402+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
32403+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
32404+
32405+ if (err)
32406+ break;
32407+
32408+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32409+ regs->r11 = addr1;
32410+ regs->r10 = addr2;
32411+ regs->ip = addr1;
32412+ return 2;
32413+ }
32414+ } while (0);
32415+
32416+ do { /* PaX: gcc trampoline emulation #2 */
32417+ unsigned short mov1, mov2, jmp1;
32418+ unsigned char jmp2;
32419+ unsigned long addr1, addr2;
32420+
32421+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32422+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32423+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32424+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32425+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
32426+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
32427+
32428+ if (err)
32429+ break;
32430+
32431+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32432+ regs->r11 = addr1;
32433+ regs->r10 = addr2;
32434+ regs->ip = addr1;
32435+ return 2;
32436+ }
32437+ } while (0);
32438+
32439+ return 1; /* PaX in action */
32440+}
32441+#endif
32442+
32443+/*
32444+ * PaX: decide what to do with offenders (regs->ip = fault address)
32445+ *
32446+ * returns 1 when task should be killed
32447+ * 2 when gcc trampoline was detected
32448+ */
32449+static int pax_handle_fetch_fault(struct pt_regs *regs)
32450+{
32451+ if (v8086_mode(regs))
32452+ return 1;
32453+
32454+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
32455+ return 1;
32456+
32457+#ifdef CONFIG_X86_32
32458+ return pax_handle_fetch_fault_32(regs);
32459+#else
32460+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
32461+ return pax_handle_fetch_fault_32(regs);
32462+ else
32463+ return pax_handle_fetch_fault_64(regs);
32464+#endif
32465+}
32466+#endif
32467+
32468+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32469+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
32470+{
32471+ long i;
32472+
32473+ printk(KERN_ERR "PAX: bytes at PC: ");
32474+ for (i = 0; i < 20; i++) {
32475+ unsigned char c;
32476+ if (get_user(c, (unsigned char __force_user *)pc+i))
32477+ printk(KERN_CONT "?? ");
32478+ else
32479+ printk(KERN_CONT "%02x ", c);
32480+ }
32481+ printk("\n");
32482+
32483+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
32484+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
32485+ unsigned long c;
32486+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
32487+#ifdef CONFIG_X86_32
32488+ printk(KERN_CONT "???????? ");
32489+#else
32490+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
32491+ printk(KERN_CONT "???????? ???????? ");
32492+ else
32493+ printk(KERN_CONT "???????????????? ");
32494+#endif
32495+ } else {
32496+#ifdef CONFIG_X86_64
32497+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
32498+ printk(KERN_CONT "%08x ", (unsigned int)c);
32499+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
32500+ } else
32501+#endif
32502+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
32503+ }
32504+ }
32505+ printk("\n");
32506+}
32507+#endif
32508+
32509+/**
32510+ * probe_kernel_write(): safely attempt to write to a location
32511+ * @dst: address to write to
32512+ * @src: pointer to the data that shall be written
32513+ * @size: size of the data chunk
32514+ *
32515+ * Safely write to address @dst from the buffer at @src. If a kernel fault
32516+ * happens, handle that and return -EFAULT.
32517+ */
32518+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
32519+{
32520+ long ret;
32521+ mm_segment_t old_fs = get_fs();
32522+
32523+ set_fs(KERNEL_DS);
32524+ pagefault_disable();
32525+ pax_open_kernel();
32526+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
32527+ pax_close_kernel();
32528+ pagefault_enable();
32529+ set_fs(old_fs);
32530+
32531+ return ret ? -EFAULT : 0;
32532+}
32533diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
32534index 81bf3d2..7ef25c2 100644
32535--- a/arch/x86/mm/gup.c
32536+++ b/arch/x86/mm/gup.c
32537@@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
32538 addr = start;
32539 len = (unsigned long) nr_pages << PAGE_SHIFT;
32540 end = start + len;
32541- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
32542+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32543 (void __user *)start, len)))
32544 return 0;
32545
32546@@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
32547 goto slow_irqon;
32548 #endif
32549
32550+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32551+ (void __user *)start, len)))
32552+ return 0;
32553+
32554 /*
32555 * XXX: batch / limit 'nr', to avoid large irq off latency
32556 * needs some instrumenting to determine the common sizes used by
32557diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
32558index 4500142..53a363c 100644
32559--- a/arch/x86/mm/highmem_32.c
32560+++ b/arch/x86/mm/highmem_32.c
32561@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
32562 idx = type + KM_TYPE_NR*smp_processor_id();
32563 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
32564 BUG_ON(!pte_none(*(kmap_pte-idx)));
32565+
32566+ pax_open_kernel();
32567 set_pte(kmap_pte-idx, mk_pte(page, prot));
32568+ pax_close_kernel();
32569+
32570 arch_flush_lazy_mmu_mode();
32571
32572 return (void *)vaddr;
32573diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
32574index 42982b2..7168fc3 100644
32575--- a/arch/x86/mm/hugetlbpage.c
32576+++ b/arch/x86/mm/hugetlbpage.c
32577@@ -74,23 +74,24 @@ int pud_huge(pud_t pud)
32578 #ifdef CONFIG_HUGETLB_PAGE
32579 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
32580 unsigned long addr, unsigned long len,
32581- unsigned long pgoff, unsigned long flags)
32582+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32583 {
32584 struct hstate *h = hstate_file(file);
32585 struct vm_unmapped_area_info info;
32586-
32587+
32588 info.flags = 0;
32589 info.length = len;
32590 info.low_limit = current->mm->mmap_legacy_base;
32591 info.high_limit = TASK_SIZE;
32592 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32593 info.align_offset = 0;
32594+ info.threadstack_offset = offset;
32595 return vm_unmapped_area(&info);
32596 }
32597
32598 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32599 unsigned long addr0, unsigned long len,
32600- unsigned long pgoff, unsigned long flags)
32601+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32602 {
32603 struct hstate *h = hstate_file(file);
32604 struct vm_unmapped_area_info info;
32605@@ -102,6 +103,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32606 info.high_limit = current->mm->mmap_base;
32607 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32608 info.align_offset = 0;
32609+ info.threadstack_offset = offset;
32610 addr = vm_unmapped_area(&info);
32611
32612 /*
32613@@ -114,6 +116,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32614 VM_BUG_ON(addr != -ENOMEM);
32615 info.flags = 0;
32616 info.low_limit = TASK_UNMAPPED_BASE;
32617+
32618+#ifdef CONFIG_PAX_RANDMMAP
32619+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
32620+ info.low_limit += current->mm->delta_mmap;
32621+#endif
32622+
32623 info.high_limit = TASK_SIZE;
32624 addr = vm_unmapped_area(&info);
32625 }
32626@@ -128,10 +136,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32627 struct hstate *h = hstate_file(file);
32628 struct mm_struct *mm = current->mm;
32629 struct vm_area_struct *vma;
32630+ unsigned long pax_task_size = TASK_SIZE;
32631+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
32632
32633 if (len & ~huge_page_mask(h))
32634 return -EINVAL;
32635- if (len > TASK_SIZE)
32636+
32637+#ifdef CONFIG_PAX_SEGMEXEC
32638+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
32639+ pax_task_size = SEGMEXEC_TASK_SIZE;
32640+#endif
32641+
32642+ pax_task_size -= PAGE_SIZE;
32643+
32644+ if (len > pax_task_size)
32645 return -ENOMEM;
32646
32647 if (flags & MAP_FIXED) {
32648@@ -140,19 +158,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32649 return addr;
32650 }
32651
32652+#ifdef CONFIG_PAX_RANDMMAP
32653+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
32654+#endif
32655+
32656 if (addr) {
32657 addr = ALIGN(addr, huge_page_size(h));
32658 vma = find_vma(mm, addr);
32659- if (TASK_SIZE - len >= addr &&
32660- (!vma || addr + len <= vma->vm_start))
32661+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
32662 return addr;
32663 }
32664 if (mm->get_unmapped_area == arch_get_unmapped_area)
32665 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
32666- pgoff, flags);
32667+ pgoff, flags, offset);
32668 else
32669 return hugetlb_get_unmapped_area_topdown(file, addr, len,
32670- pgoff, flags);
32671+ pgoff, flags, offset);
32672 }
32673 #endif /* CONFIG_HUGETLB_PAGE */
32674
32675diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
32676index a110efc..a31a18f 100644
32677--- a/arch/x86/mm/init.c
32678+++ b/arch/x86/mm/init.c
32679@@ -4,6 +4,7 @@
32680 #include <linux/swap.h>
32681 #include <linux/memblock.h>
32682 #include <linux/bootmem.h> /* for max_low_pfn */
32683+#include <linux/tboot.h>
32684
32685 #include <asm/cacheflush.h>
32686 #include <asm/e820.h>
32687@@ -17,6 +18,8 @@
32688 #include <asm/proto.h>
32689 #include <asm/dma.h> /* for MAX_DMA_PFN */
32690 #include <asm/microcode.h>
32691+#include <asm/desc.h>
32692+#include <asm/bios_ebda.h>
32693
32694 /*
32695 * We need to define the tracepoints somewhere, and tlb.c
32696@@ -620,7 +623,18 @@ void __init init_mem_mapping(void)
32697 early_ioremap_page_table_range_init();
32698 #endif
32699
32700+#ifdef CONFIG_PAX_PER_CPU_PGD
32701+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
32702+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32703+ KERNEL_PGD_PTRS);
32704+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
32705+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32706+ KERNEL_PGD_PTRS);
32707+ load_cr3(get_cpu_pgd(0, kernel));
32708+#else
32709 load_cr3(swapper_pg_dir);
32710+#endif
32711+
32712 __flush_tlb_all();
32713
32714 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
32715@@ -636,10 +650,40 @@ void __init init_mem_mapping(void)
32716 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
32717 * mmio resources as well as potential bios/acpi data regions.
32718 */
32719+
32720+#ifdef CONFIG_GRKERNSEC_KMEM
32721+static unsigned int ebda_start __read_only;
32722+static unsigned int ebda_end __read_only;
32723+#endif
32724+
32725 int devmem_is_allowed(unsigned long pagenr)
32726 {
32727- if (pagenr < 256)
32728+#ifdef CONFIG_GRKERNSEC_KMEM
32729+ /* allow BDA */
32730+ if (!pagenr)
32731 return 1;
32732+ /* allow EBDA */
32733+ if (pagenr >= ebda_start && pagenr < ebda_end)
32734+ return 1;
32735+ /* if tboot is in use, allow access to its hardcoded serial log range */
32736+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
32737+ return 1;
32738+#else
32739+ if (!pagenr)
32740+ return 1;
32741+#ifdef CONFIG_VM86
32742+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
32743+ return 1;
32744+#endif
32745+#endif
32746+
32747+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
32748+ return 1;
32749+#ifdef CONFIG_GRKERNSEC_KMEM
32750+ /* throw out everything else below 1MB */
32751+ if (pagenr <= 256)
32752+ return 0;
32753+#endif
32754 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
32755 return 0;
32756 if (!page_is_ram(pagenr))
32757@@ -685,8 +729,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
32758 #endif
32759 }
32760
32761+#ifdef CONFIG_GRKERNSEC_KMEM
32762+static inline void gr_init_ebda(void)
32763+{
32764+ unsigned int ebda_addr;
32765+ unsigned int ebda_size = 0;
32766+
32767+ ebda_addr = get_bios_ebda();
32768+ if (ebda_addr) {
32769+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
32770+ ebda_size <<= 10;
32771+ }
32772+ if (ebda_addr && ebda_size) {
32773+ ebda_start = ebda_addr >> PAGE_SHIFT;
32774+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
32775+ } else {
32776+ ebda_start = 0x9f000 >> PAGE_SHIFT;
32777+ ebda_end = 0xa0000 >> PAGE_SHIFT;
32778+ }
32779+}
32780+#else
32781+static inline void gr_init_ebda(void) { }
32782+#endif
32783+
32784 void free_initmem(void)
32785 {
32786+#ifdef CONFIG_PAX_KERNEXEC
32787+#ifdef CONFIG_X86_32
32788+ /* PaX: limit KERNEL_CS to actual size */
32789+ unsigned long addr, limit;
32790+ struct desc_struct d;
32791+ int cpu;
32792+#else
32793+ pgd_t *pgd;
32794+ pud_t *pud;
32795+ pmd_t *pmd;
32796+ unsigned long addr, end;
32797+#endif
32798+#endif
32799+
32800+ gr_init_ebda();
32801+
32802+#ifdef CONFIG_PAX_KERNEXEC
32803+#ifdef CONFIG_X86_32
32804+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
32805+ limit = (limit - 1UL) >> PAGE_SHIFT;
32806+
32807+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
32808+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
32809+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
32810+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
32811+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
32812+ }
32813+
32814+ /* PaX: make KERNEL_CS read-only */
32815+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
32816+ if (!paravirt_enabled())
32817+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
32818+/*
32819+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
32820+ pgd = pgd_offset_k(addr);
32821+ pud = pud_offset(pgd, addr);
32822+ pmd = pmd_offset(pud, addr);
32823+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32824+ }
32825+*/
32826+#ifdef CONFIG_X86_PAE
32827+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
32828+/*
32829+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
32830+ pgd = pgd_offset_k(addr);
32831+ pud = pud_offset(pgd, addr);
32832+ pmd = pmd_offset(pud, addr);
32833+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32834+ }
32835+*/
32836+#endif
32837+
32838+#ifdef CONFIG_MODULES
32839+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
32840+#endif
32841+
32842+#else
32843+ /* PaX: make kernel code/rodata read-only, rest non-executable */
32844+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
32845+ pgd = pgd_offset_k(addr);
32846+ pud = pud_offset(pgd, addr);
32847+ pmd = pmd_offset(pud, addr);
32848+ if (!pmd_present(*pmd))
32849+ continue;
32850+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
32851+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32852+ else
32853+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32854+ }
32855+
32856+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
32857+ end = addr + KERNEL_IMAGE_SIZE;
32858+ for (; addr < end; addr += PMD_SIZE) {
32859+ pgd = pgd_offset_k(addr);
32860+ pud = pud_offset(pgd, addr);
32861+ pmd = pmd_offset(pud, addr);
32862+ if (!pmd_present(*pmd))
32863+ continue;
32864+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
32865+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32866+ }
32867+#endif
32868+
32869+ flush_tlb_all();
32870+#endif
32871+
32872 free_init_pages("unused kernel",
32873 (unsigned long)(&__init_begin),
32874 (unsigned long)(&__init_end));
32875diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
32876index c8140e1..59257fc 100644
32877--- a/arch/x86/mm/init_32.c
32878+++ b/arch/x86/mm/init_32.c
32879@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
32880 bool __read_mostly __vmalloc_start_set = false;
32881
32882 /*
32883- * Creates a middle page table and puts a pointer to it in the
32884- * given global directory entry. This only returns the gd entry
32885- * in non-PAE compilation mode, since the middle layer is folded.
32886- */
32887-static pmd_t * __init one_md_table_init(pgd_t *pgd)
32888-{
32889- pud_t *pud;
32890- pmd_t *pmd_table;
32891-
32892-#ifdef CONFIG_X86_PAE
32893- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
32894- pmd_table = (pmd_t *)alloc_low_page();
32895- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
32896- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
32897- pud = pud_offset(pgd, 0);
32898- BUG_ON(pmd_table != pmd_offset(pud, 0));
32899-
32900- return pmd_table;
32901- }
32902-#endif
32903- pud = pud_offset(pgd, 0);
32904- pmd_table = pmd_offset(pud, 0);
32905-
32906- return pmd_table;
32907-}
32908-
32909-/*
32910 * Create a page table and place a pointer to it in a middle page
32911 * directory entry:
32912 */
32913@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
32914 pte_t *page_table = (pte_t *)alloc_low_page();
32915
32916 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
32917+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32918+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
32919+#else
32920 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
32921+#endif
32922 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
32923 }
32924
32925 return pte_offset_kernel(pmd, 0);
32926 }
32927
32928+static pmd_t * __init one_md_table_init(pgd_t *pgd)
32929+{
32930+ pud_t *pud;
32931+ pmd_t *pmd_table;
32932+
32933+ pud = pud_offset(pgd, 0);
32934+ pmd_table = pmd_offset(pud, 0);
32935+
32936+ return pmd_table;
32937+}
32938+
32939 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
32940 {
32941 int pgd_idx = pgd_index(vaddr);
32942@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32943 int pgd_idx, pmd_idx;
32944 unsigned long vaddr;
32945 pgd_t *pgd;
32946+ pud_t *pud;
32947 pmd_t *pmd;
32948 pte_t *pte = NULL;
32949 unsigned long count = page_table_range_init_count(start, end);
32950@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32951 pgd = pgd_base + pgd_idx;
32952
32953 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
32954- pmd = one_md_table_init(pgd);
32955- pmd = pmd + pmd_index(vaddr);
32956+ pud = pud_offset(pgd, vaddr);
32957+ pmd = pmd_offset(pud, vaddr);
32958+
32959+#ifdef CONFIG_X86_PAE
32960+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
32961+#endif
32962+
32963 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
32964 pmd++, pmd_idx++) {
32965 pte = page_table_kmap_check(one_page_table_init(pmd),
32966@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32967 }
32968 }
32969
32970-static inline int is_kernel_text(unsigned long addr)
32971+static inline int is_kernel_text(unsigned long start, unsigned long end)
32972 {
32973- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
32974- return 1;
32975- return 0;
32976+ if ((start >= ktla_ktva((unsigned long)_etext) ||
32977+ end <= ktla_ktva((unsigned long)_stext)) &&
32978+ (start >= ktla_ktva((unsigned long)_einittext) ||
32979+ end <= ktla_ktva((unsigned long)_sinittext)) &&
32980+
32981+#ifdef CONFIG_ACPI_SLEEP
32982+ (start >= (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
32983+#endif
32984+
32985+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
32986+ return 0;
32987+ return 1;
32988 }
32989
32990 /*
32991@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
32992 unsigned long last_map_addr = end;
32993 unsigned long start_pfn, end_pfn;
32994 pgd_t *pgd_base = swapper_pg_dir;
32995- int pgd_idx, pmd_idx, pte_ofs;
32996+ unsigned int pgd_idx, pmd_idx, pte_ofs;
32997 unsigned long pfn;
32998 pgd_t *pgd;
32999+ pud_t *pud;
33000 pmd_t *pmd;
33001 pte_t *pte;
33002 unsigned pages_2m, pages_4k;
33003@@ -291,8 +295,13 @@ repeat:
33004 pfn = start_pfn;
33005 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33006 pgd = pgd_base + pgd_idx;
33007- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
33008- pmd = one_md_table_init(pgd);
33009+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
33010+ pud = pud_offset(pgd, 0);
33011+ pmd = pmd_offset(pud, 0);
33012+
33013+#ifdef CONFIG_X86_PAE
33014+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
33015+#endif
33016
33017 if (pfn >= end_pfn)
33018 continue;
33019@@ -304,14 +313,13 @@ repeat:
33020 #endif
33021 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
33022 pmd++, pmd_idx++) {
33023- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
33024+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
33025
33026 /*
33027 * Map with big pages if possible, otherwise
33028 * create normal page tables:
33029 */
33030 if (use_pse) {
33031- unsigned int addr2;
33032 pgprot_t prot = PAGE_KERNEL_LARGE;
33033 /*
33034 * first pass will use the same initial
33035@@ -322,11 +330,7 @@ repeat:
33036 _PAGE_PSE);
33037
33038 pfn &= PMD_MASK >> PAGE_SHIFT;
33039- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
33040- PAGE_OFFSET + PAGE_SIZE-1;
33041-
33042- if (is_kernel_text(addr) ||
33043- is_kernel_text(addr2))
33044+ if (is_kernel_text(address, address + PMD_SIZE))
33045 prot = PAGE_KERNEL_LARGE_EXEC;
33046
33047 pages_2m++;
33048@@ -343,7 +347,7 @@ repeat:
33049 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33050 pte += pte_ofs;
33051 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
33052- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
33053+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
33054 pgprot_t prot = PAGE_KERNEL;
33055 /*
33056 * first pass will use the same initial
33057@@ -351,7 +355,7 @@ repeat:
33058 */
33059 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
33060
33061- if (is_kernel_text(addr))
33062+ if (is_kernel_text(address, address + PAGE_SIZE))
33063 prot = PAGE_KERNEL_EXEC;
33064
33065 pages_4k++;
33066@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
33067
33068 pud = pud_offset(pgd, va);
33069 pmd = pmd_offset(pud, va);
33070- if (!pmd_present(*pmd))
33071+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
33072 break;
33073
33074 /* should not be large page here */
33075@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
33076
33077 static void __init pagetable_init(void)
33078 {
33079- pgd_t *pgd_base = swapper_pg_dir;
33080-
33081- permanent_kmaps_init(pgd_base);
33082+ permanent_kmaps_init(swapper_pg_dir);
33083 }
33084
33085-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
33086+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL);
33087 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33088
33089 /* user-defined highmem size */
33090@@ -787,10 +789,10 @@ void __init mem_init(void)
33091 ((unsigned long)&__init_end -
33092 (unsigned long)&__init_begin) >> 10,
33093
33094- (unsigned long)&_etext, (unsigned long)&_edata,
33095- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
33096+ (unsigned long)&_sdata, (unsigned long)&_edata,
33097+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
33098
33099- (unsigned long)&_text, (unsigned long)&_etext,
33100+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
33101 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
33102
33103 /*
33104@@ -884,6 +886,7 @@ void set_kernel_text_rw(void)
33105 if (!kernel_set_to_readonly)
33106 return;
33107
33108+ start = ktla_ktva(start);
33109 pr_debug("Set kernel text: %lx - %lx for read write\n",
33110 start, start+size);
33111
33112@@ -898,6 +901,7 @@ void set_kernel_text_ro(void)
33113 if (!kernel_set_to_readonly)
33114 return;
33115
33116+ start = ktla_ktva(start);
33117 pr_debug("Set kernel text: %lx - %lx for read only\n",
33118 start, start+size);
33119
33120@@ -926,6 +930,7 @@ void mark_rodata_ro(void)
33121 unsigned long start = PFN_ALIGN(_text);
33122 unsigned long size = PFN_ALIGN(_etext) - start;
33123
33124+ start = ktla_ktva(start);
33125 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
33126 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
33127 size >> 10);
33128diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
33129index 30eb05a..ae671ac 100644
33130--- a/arch/x86/mm/init_64.c
33131+++ b/arch/x86/mm/init_64.c
33132@@ -150,7 +150,7 @@ early_param("gbpages", parse_direct_gbpages_on);
33133 * around without checking the pgd every time.
33134 */
33135
33136-pteval_t __supported_pte_mask __read_mostly = ~0;
33137+pteval_t __supported_pte_mask __read_only = ~_PAGE_NX;
33138 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33139
33140 int force_personality32;
33141@@ -183,7 +183,12 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33142
33143 for (address = start; address <= end; address += PGDIR_SIZE) {
33144 const pgd_t *pgd_ref = pgd_offset_k(address);
33145+
33146+#ifdef CONFIG_PAX_PER_CPU_PGD
33147+ unsigned long cpu;
33148+#else
33149 struct page *page;
33150+#endif
33151
33152 /*
33153 * When it is called after memory hot remove, pgd_none()
33154@@ -194,6 +199,25 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33155 continue;
33156
33157 spin_lock(&pgd_lock);
33158+
33159+#ifdef CONFIG_PAX_PER_CPU_PGD
33160+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33161+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
33162+
33163+ if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33164+ BUG_ON(pgd_page_vaddr(*pgd)
33165+ != pgd_page_vaddr(*pgd_ref));
33166+
33167+ if (removed) {
33168+ if (pgd_none(*pgd_ref) && !pgd_none(*pgd))
33169+ pgd_clear(pgd);
33170+ } else {
33171+ if (pgd_none(*pgd))
33172+ set_pgd(pgd, *pgd_ref);
33173+ }
33174+
33175+ pgd = pgd_offset_cpu(cpu, kernel, address);
33176+#else
33177 list_for_each_entry(page, &pgd_list, lru) {
33178 pgd_t *pgd;
33179 spinlock_t *pgt_lock;
33180@@ -202,6 +226,7 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33181 /* the pgt_lock only for Xen */
33182 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
33183 spin_lock(pgt_lock);
33184+#endif
33185
33186 if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33187 BUG_ON(pgd_page_vaddr(*pgd)
33188@@ -215,7 +240,10 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33189 set_pgd(pgd, *pgd_ref);
33190 }
33191
33192+#ifndef CONFIG_PAX_PER_CPU_PGD
33193 spin_unlock(pgt_lock);
33194+#endif
33195+
33196 }
33197 spin_unlock(&pgd_lock);
33198 }
33199@@ -248,7 +276,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
33200 {
33201 if (pgd_none(*pgd)) {
33202 pud_t *pud = (pud_t *)spp_getpage();
33203- pgd_populate(&init_mm, pgd, pud);
33204+ pgd_populate_kernel(&init_mm, pgd, pud);
33205 if (pud != pud_offset(pgd, 0))
33206 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
33207 pud, pud_offset(pgd, 0));
33208@@ -260,7 +288,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
33209 {
33210 if (pud_none(*pud)) {
33211 pmd_t *pmd = (pmd_t *) spp_getpage();
33212- pud_populate(&init_mm, pud, pmd);
33213+ pud_populate_kernel(&init_mm, pud, pmd);
33214 if (pmd != pmd_offset(pud, 0))
33215 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
33216 pmd, pmd_offset(pud, 0));
33217@@ -289,7 +317,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
33218 pmd = fill_pmd(pud, vaddr);
33219 pte = fill_pte(pmd, vaddr);
33220
33221+ pax_open_kernel();
33222 set_pte(pte, new_pte);
33223+ pax_close_kernel();
33224
33225 /*
33226 * It's enough to flush this one mapping.
33227@@ -351,14 +381,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
33228 pgd = pgd_offset_k((unsigned long)__va(phys));
33229 if (pgd_none(*pgd)) {
33230 pud = (pud_t *) spp_getpage();
33231- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
33232- _PAGE_USER));
33233+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
33234 }
33235 pud = pud_offset(pgd, (unsigned long)__va(phys));
33236 if (pud_none(*pud)) {
33237 pmd = (pmd_t *) spp_getpage();
33238- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
33239- _PAGE_USER));
33240+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
33241 }
33242 pmd = pmd_offset(pud, phys);
33243 BUG_ON(!pmd_none(*pmd));
33244@@ -599,7 +627,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
33245 prot);
33246
33247 spin_lock(&init_mm.page_table_lock);
33248- pud_populate(&init_mm, pud, pmd);
33249+ pud_populate_kernel(&init_mm, pud, pmd);
33250 spin_unlock(&init_mm.page_table_lock);
33251 }
33252 __flush_tlb_all();
33253@@ -640,7 +668,7 @@ kernel_physical_mapping_init(unsigned long start,
33254 page_size_mask);
33255
33256 spin_lock(&init_mm.page_table_lock);
33257- pgd_populate(&init_mm, pgd, pud);
33258+ pgd_populate_kernel(&init_mm, pgd, pud);
33259 spin_unlock(&init_mm.page_table_lock);
33260 pgd_changed = true;
33261 }
33262diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
33263index 9ca35fc..4b2b7b7 100644
33264--- a/arch/x86/mm/iomap_32.c
33265+++ b/arch/x86/mm/iomap_32.c
33266@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
33267 type = kmap_atomic_idx_push();
33268 idx = type + KM_TYPE_NR * smp_processor_id();
33269 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
33270+
33271+ pax_open_kernel();
33272 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
33273+ pax_close_kernel();
33274+
33275 arch_flush_lazy_mmu_mode();
33276
33277 return (void *)vaddr;
33278diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
33279index fdf617c..b9e85bc 100644
33280--- a/arch/x86/mm/ioremap.c
33281+++ b/arch/x86/mm/ioremap.c
33282@@ -56,8 +56,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
33283 unsigned long i;
33284
33285 for (i = 0; i < nr_pages; ++i)
33286- if (pfn_valid(start_pfn + i) &&
33287- !PageReserved(pfn_to_page(start_pfn + i)))
33288+ if (pfn_valid(start_pfn + i) && (start_pfn + i >= 0x100 ||
33289+ !PageReserved(pfn_to_page(start_pfn + i))))
33290 return 1;
33291
33292 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
33293@@ -283,7 +283,7 @@ EXPORT_SYMBOL(ioremap_prot);
33294 *
33295 * Caller must ensure there is only one unmapping for the same pointer.
33296 */
33297-void iounmap(volatile void __iomem *addr)
33298+void iounmap(const volatile void __iomem *addr)
33299 {
33300 struct vm_struct *p, *o;
33301
33302@@ -332,30 +332,29 @@ EXPORT_SYMBOL(iounmap);
33303 */
33304 void *xlate_dev_mem_ptr(phys_addr_t phys)
33305 {
33306- void *addr;
33307- unsigned long start = phys & PAGE_MASK;
33308-
33309 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
33310- if (page_is_ram(start >> PAGE_SHIFT))
33311+ if (page_is_ram(phys >> PAGE_SHIFT))
33312+#ifdef CONFIG_HIGHMEM
33313+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33314+#endif
33315 return __va(phys);
33316
33317- addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
33318- if (addr)
33319- addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
33320-
33321- return addr;
33322+ return (void __force *)ioremap_cache(phys, PAGE_SIZE);
33323 }
33324
33325 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
33326 {
33327 if (page_is_ram(phys >> PAGE_SHIFT))
33328+#ifdef CONFIG_HIGHMEM
33329+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33330+#endif
33331 return;
33332
33333 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
33334 return;
33335 }
33336
33337-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
33338+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
33339
33340 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
33341 {
33342@@ -391,8 +390,7 @@ void __init early_ioremap_init(void)
33343 early_ioremap_setup();
33344
33345 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
33346- memset(bm_pte, 0, sizeof(bm_pte));
33347- pmd_populate_kernel(&init_mm, pmd, bm_pte);
33348+ pmd_populate_user(&init_mm, pmd, bm_pte);
33349
33350 /*
33351 * The boot-ioremap range spans multiple pmds, for which
33352diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
33353index b4f2e7e..96c9c3e 100644
33354--- a/arch/x86/mm/kmemcheck/kmemcheck.c
33355+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
33356@@ -628,9 +628,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
33357 * memory (e.g. tracked pages)? For now, we need this to avoid
33358 * invoking kmemcheck for PnP BIOS calls.
33359 */
33360- if (regs->flags & X86_VM_MASK)
33361+ if (v8086_mode(regs))
33362 return false;
33363- if (regs->cs != __KERNEL_CS)
33364+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
33365 return false;
33366
33367 pte = kmemcheck_pte_lookup(address);
33368diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
33369index df4552b..12c129c 100644
33370--- a/arch/x86/mm/mmap.c
33371+++ b/arch/x86/mm/mmap.c
33372@@ -52,7 +52,7 @@ static unsigned long stack_maxrandom_size(void)
33373 * Leave an at least ~128 MB hole with possible stack randomization.
33374 */
33375 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
33376-#define MAX_GAP (TASK_SIZE/6*5)
33377+#define MAX_GAP (pax_task_size/6*5)
33378
33379 static int mmap_is_legacy(void)
33380 {
33381@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
33382 return rnd << PAGE_SHIFT;
33383 }
33384
33385-static unsigned long mmap_base(void)
33386+static unsigned long mmap_base(struct mm_struct *mm)
33387 {
33388 unsigned long gap = rlimit(RLIMIT_STACK);
33389+ unsigned long pax_task_size = TASK_SIZE;
33390+
33391+#ifdef CONFIG_PAX_SEGMEXEC
33392+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33393+ pax_task_size = SEGMEXEC_TASK_SIZE;
33394+#endif
33395
33396 if (gap < MIN_GAP)
33397 gap = MIN_GAP;
33398 else if (gap > MAX_GAP)
33399 gap = MAX_GAP;
33400
33401- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
33402+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
33403 }
33404
33405 /*
33406 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
33407 * does, but not when emulating X86_32
33408 */
33409-static unsigned long mmap_legacy_base(void)
33410+static unsigned long mmap_legacy_base(struct mm_struct *mm)
33411 {
33412- if (mmap_is_ia32())
33413+ if (mmap_is_ia32()) {
33414+
33415+#ifdef CONFIG_PAX_SEGMEXEC
33416+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33417+ return SEGMEXEC_TASK_UNMAPPED_BASE;
33418+ else
33419+#endif
33420+
33421 return TASK_UNMAPPED_BASE;
33422- else
33423+ } else
33424 return TASK_UNMAPPED_BASE + mmap_rnd();
33425 }
33426
33427@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
33428 */
33429 void arch_pick_mmap_layout(struct mm_struct *mm)
33430 {
33431- mm->mmap_legacy_base = mmap_legacy_base();
33432- mm->mmap_base = mmap_base();
33433+ mm->mmap_legacy_base = mmap_legacy_base(mm);
33434+ mm->mmap_base = mmap_base(mm);
33435+
33436+#ifdef CONFIG_PAX_RANDMMAP
33437+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
33438+ mm->mmap_legacy_base += mm->delta_mmap;
33439+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
33440+ }
33441+#endif
33442
33443 if (mmap_is_legacy()) {
33444 mm->mmap_base = mm->mmap_legacy_base;
33445diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
33446index 0057a7a..95c7edd 100644
33447--- a/arch/x86/mm/mmio-mod.c
33448+++ b/arch/x86/mm/mmio-mod.c
33449@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
33450 break;
33451 default:
33452 {
33453- unsigned char *ip = (unsigned char *)instptr;
33454+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
33455 my_trace->opcode = MMIO_UNKNOWN_OP;
33456 my_trace->width = 0;
33457 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
33458@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
33459 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33460 void __iomem *addr)
33461 {
33462- static atomic_t next_id;
33463+ static atomic_unchecked_t next_id;
33464 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
33465 /* These are page-unaligned. */
33466 struct mmiotrace_map map = {
33467@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33468 .private = trace
33469 },
33470 .phys = offset,
33471- .id = atomic_inc_return(&next_id)
33472+ .id = atomic_inc_return_unchecked(&next_id)
33473 };
33474 map.map_id = trace->id;
33475
33476@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
33477 ioremap_trace_core(offset, size, addr);
33478 }
33479
33480-static void iounmap_trace_core(volatile void __iomem *addr)
33481+static void iounmap_trace_core(const volatile void __iomem *addr)
33482 {
33483 struct mmiotrace_map map = {
33484 .phys = 0,
33485@@ -328,7 +328,7 @@ not_enabled:
33486 }
33487 }
33488
33489-void mmiotrace_iounmap(volatile void __iomem *addr)
33490+void mmiotrace_iounmap(const volatile void __iomem *addr)
33491 {
33492 might_sleep();
33493 if (is_enabled()) /* recheck and proper locking in *_core() */
33494diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
33495index cd4785b..25188b6 100644
33496--- a/arch/x86/mm/numa.c
33497+++ b/arch/x86/mm/numa.c
33498@@ -499,7 +499,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
33499 }
33500 }
33501
33502-static int __init numa_register_memblks(struct numa_meminfo *mi)
33503+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
33504 {
33505 unsigned long uninitialized_var(pfn_align);
33506 int i, nid;
33507diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
33508index 536ea2f..f42c293 100644
33509--- a/arch/x86/mm/pageattr.c
33510+++ b/arch/x86/mm/pageattr.c
33511@@ -262,7 +262,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33512 */
33513 #ifdef CONFIG_PCI_BIOS
33514 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
33515- pgprot_val(forbidden) |= _PAGE_NX;
33516+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33517 #endif
33518
33519 /*
33520@@ -270,9 +270,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33521 * Does not cover __inittext since that is gone later on. On
33522 * 64bit we do not enforce !NX on the low mapping
33523 */
33524- if (within(address, (unsigned long)_text, (unsigned long)_etext))
33525- pgprot_val(forbidden) |= _PAGE_NX;
33526+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
33527+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33528
33529+#ifdef CONFIG_DEBUG_RODATA
33530 /*
33531 * The .rodata section needs to be read-only. Using the pfn
33532 * catches all aliases.
33533@@ -280,6 +281,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33534 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
33535 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
33536 pgprot_val(forbidden) |= _PAGE_RW;
33537+#endif
33538
33539 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
33540 /*
33541@@ -318,6 +320,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33542 }
33543 #endif
33544
33545+#ifdef CONFIG_PAX_KERNEXEC
33546+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
33547+ pgprot_val(forbidden) |= _PAGE_RW;
33548+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33549+ }
33550+#endif
33551+
33552 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
33553
33554 return prot;
33555@@ -440,23 +449,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
33556 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
33557 {
33558 /* change init_mm */
33559+ pax_open_kernel();
33560 set_pte_atomic(kpte, pte);
33561+
33562 #ifdef CONFIG_X86_32
33563 if (!SHARED_KERNEL_PMD) {
33564+
33565+#ifdef CONFIG_PAX_PER_CPU_PGD
33566+ unsigned long cpu;
33567+#else
33568 struct page *page;
33569+#endif
33570
33571+#ifdef CONFIG_PAX_PER_CPU_PGD
33572+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33573+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
33574+#else
33575 list_for_each_entry(page, &pgd_list, lru) {
33576- pgd_t *pgd;
33577+ pgd_t *pgd = (pgd_t *)page_address(page);
33578+#endif
33579+
33580 pud_t *pud;
33581 pmd_t *pmd;
33582
33583- pgd = (pgd_t *)page_address(page) + pgd_index(address);
33584+ pgd += pgd_index(address);
33585 pud = pud_offset(pgd, address);
33586 pmd = pmd_offset(pud, address);
33587 set_pte_atomic((pte_t *)pmd, pte);
33588 }
33589 }
33590 #endif
33591+ pax_close_kernel();
33592 }
33593
33594 static int
33595diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
33596index 7ac6869..c0ba541 100644
33597--- a/arch/x86/mm/pat.c
33598+++ b/arch/x86/mm/pat.c
33599@@ -89,7 +89,7 @@ static inline enum page_cache_mode get_page_memtype(struct page *pg)
33600 unsigned long pg_flags = pg->flags & _PGMT_MASK;
33601
33602 if (pg_flags == _PGMT_DEFAULT)
33603- return -1;
33604+ return _PAGE_CACHE_MODE_NUM;
33605 else if (pg_flags == _PGMT_WC)
33606 return _PAGE_CACHE_MODE_WC;
33607 else if (pg_flags == _PGMT_UC_MINUS)
33608@@ -346,7 +346,7 @@ static int reserve_ram_pages_type(u64 start, u64 end,
33609
33610 page = pfn_to_page(pfn);
33611 type = get_page_memtype(page);
33612- if (type != -1) {
33613+ if (type != _PAGE_CACHE_MODE_NUM) {
33614 pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
33615 start, end - 1, type, req_type);
33616 if (new_type)
33617@@ -498,7 +498,7 @@ int free_memtype(u64 start, u64 end)
33618
33619 if (!entry) {
33620 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
33621- current->comm, current->pid, start, end - 1);
33622+ current->comm, task_pid_nr(current), start, end - 1);
33623 return -EINVAL;
33624 }
33625
33626@@ -532,10 +532,10 @@ static enum page_cache_mode lookup_memtype(u64 paddr)
33627 page = pfn_to_page(paddr >> PAGE_SHIFT);
33628 rettype = get_page_memtype(page);
33629 /*
33630- * -1 from get_page_memtype() implies RAM page is in its
33631+ * _PAGE_CACHE_MODE_NUM from get_page_memtype() implies RAM page is in its
33632 * default state and not reserved, and hence of type WB
33633 */
33634- if (rettype == -1)
33635+ if (rettype == _PAGE_CACHE_MODE_NUM)
33636 rettype = _PAGE_CACHE_MODE_WB;
33637
33638 return rettype;
33639@@ -628,8 +628,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33640
33641 while (cursor < to) {
33642 if (!devmem_is_allowed(pfn)) {
33643- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
33644- current->comm, from, to - 1);
33645+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
33646+ current->comm, from, to - 1, cursor);
33647 return 0;
33648 }
33649 cursor += PAGE_SIZE;
33650@@ -700,7 +700,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size,
33651 if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
33652 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
33653 "for [mem %#010Lx-%#010Lx]\n",
33654- current->comm, current->pid,
33655+ current->comm, task_pid_nr(current),
33656 cattr_name(pcm),
33657 base, (unsigned long long)(base + size-1));
33658 return -EINVAL;
33659@@ -735,7 +735,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33660 pcm = lookup_memtype(paddr);
33661 if (want_pcm != pcm) {
33662 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
33663- current->comm, current->pid,
33664+ current->comm, task_pid_nr(current),
33665 cattr_name(want_pcm),
33666 (unsigned long long)paddr,
33667 (unsigned long long)(paddr + size - 1),
33668@@ -757,7 +757,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33669 free_memtype(paddr, paddr + size);
33670 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
33671 " for [mem %#010Lx-%#010Lx], got %s\n",
33672- current->comm, current->pid,
33673+ current->comm, task_pid_nr(current),
33674 cattr_name(want_pcm),
33675 (unsigned long long)paddr,
33676 (unsigned long long)(paddr + size - 1),
33677diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
33678index 6582adc..fcc5d0b 100644
33679--- a/arch/x86/mm/pat_rbtree.c
33680+++ b/arch/x86/mm/pat_rbtree.c
33681@@ -161,7 +161,7 @@ success:
33682
33683 failure:
33684 printk(KERN_INFO "%s:%d conflicting memory types "
33685- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
33686+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
33687 end, cattr_name(found_type), cattr_name(match->type));
33688 return -EBUSY;
33689 }
33690diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
33691index 9f0614d..92ae64a 100644
33692--- a/arch/x86/mm/pf_in.c
33693+++ b/arch/x86/mm/pf_in.c
33694@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
33695 int i;
33696 enum reason_type rv = OTHERS;
33697
33698- p = (unsigned char *)ins_addr;
33699+ p = (unsigned char *)ktla_ktva(ins_addr);
33700 p += skip_prefix(p, &prf);
33701 p += get_opcode(p, &opcode);
33702
33703@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
33704 struct prefix_bits prf;
33705 int i;
33706
33707- p = (unsigned char *)ins_addr;
33708+ p = (unsigned char *)ktla_ktva(ins_addr);
33709 p += skip_prefix(p, &prf);
33710 p += get_opcode(p, &opcode);
33711
33712@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
33713 struct prefix_bits prf;
33714 int i;
33715
33716- p = (unsigned char *)ins_addr;
33717+ p = (unsigned char *)ktla_ktva(ins_addr);
33718 p += skip_prefix(p, &prf);
33719 p += get_opcode(p, &opcode);
33720
33721@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
33722 struct prefix_bits prf;
33723 int i;
33724
33725- p = (unsigned char *)ins_addr;
33726+ p = (unsigned char *)ktla_ktva(ins_addr);
33727 p += skip_prefix(p, &prf);
33728 p += get_opcode(p, &opcode);
33729 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
33730@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
33731 struct prefix_bits prf;
33732 int i;
33733
33734- p = (unsigned char *)ins_addr;
33735+ p = (unsigned char *)ktla_ktva(ins_addr);
33736 p += skip_prefix(p, &prf);
33737 p += get_opcode(p, &opcode);
33738 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
33739diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
33740index 7b22ada..b11e66f 100644
33741--- a/arch/x86/mm/pgtable.c
33742+++ b/arch/x86/mm/pgtable.c
33743@@ -97,10 +97,75 @@ static inline void pgd_list_del(pgd_t *pgd)
33744 list_del(&page->lru);
33745 }
33746
33747-#define UNSHARED_PTRS_PER_PGD \
33748- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33749+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33750+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
33751
33752+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
33753+{
33754+ unsigned int count = USER_PGD_PTRS;
33755
33756+ if (!pax_user_shadow_base)
33757+ return;
33758+
33759+ while (count--)
33760+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
33761+}
33762+#endif
33763+
33764+#ifdef CONFIG_PAX_PER_CPU_PGD
33765+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
33766+{
33767+ unsigned int count = USER_PGD_PTRS;
33768+
33769+ while (count--) {
33770+ pgd_t pgd;
33771+
33772+#ifdef CONFIG_X86_64
33773+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
33774+#else
33775+ pgd = *src++;
33776+#endif
33777+
33778+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33779+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
33780+#endif
33781+
33782+ *dst++ = pgd;
33783+ }
33784+
33785+}
33786+#endif
33787+
33788+#ifdef CONFIG_X86_64
33789+#define pxd_t pud_t
33790+#define pyd_t pgd_t
33791+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
33792+#define pgtable_pxd_page_ctor(page) true
33793+#define pgtable_pxd_page_dtor(page) do {} while (0)
33794+#define pxd_free(mm, pud) pud_free((mm), (pud))
33795+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
33796+#define pyd_offset(mm, address) pgd_offset((mm), (address))
33797+#define PYD_SIZE PGDIR_SIZE
33798+#define mm_inc_nr_pxds(mm) do {} while (0)
33799+#define mm_dec_nr_pxds(mm) do {} while (0)
33800+#else
33801+#define pxd_t pmd_t
33802+#define pyd_t pud_t
33803+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
33804+#define pgtable_pxd_page_ctor(page) pgtable_pmd_page_ctor(page)
33805+#define pgtable_pxd_page_dtor(page) pgtable_pmd_page_dtor(page)
33806+#define pxd_free(mm, pud) pmd_free((mm), (pud))
33807+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
33808+#define pyd_offset(mm, address) pud_offset((mm), (address))
33809+#define PYD_SIZE PUD_SIZE
33810+#define mm_inc_nr_pxds(mm) mm_inc_nr_pmds(mm)
33811+#define mm_dec_nr_pxds(mm) mm_dec_nr_pmds(mm)
33812+#endif
33813+
33814+#ifdef CONFIG_PAX_PER_CPU_PGD
33815+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
33816+static inline void pgd_dtor(pgd_t *pgd) {}
33817+#else
33818 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
33819 {
33820 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
33821@@ -141,6 +206,7 @@ static void pgd_dtor(pgd_t *pgd)
33822 pgd_list_del(pgd);
33823 spin_unlock(&pgd_lock);
33824 }
33825+#endif
33826
33827 /*
33828 * List of all pgd's needed for non-PAE so it can invalidate entries
33829@@ -153,7 +219,7 @@ static void pgd_dtor(pgd_t *pgd)
33830 * -- nyc
33831 */
33832
33833-#ifdef CONFIG_X86_PAE
33834+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
33835 /*
33836 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
33837 * updating the top-level pagetable entries to guarantee the
33838@@ -165,7 +231,7 @@ static void pgd_dtor(pgd_t *pgd)
33839 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
33840 * and initialize the kernel pmds here.
33841 */
33842-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
33843+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33844
33845 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33846 {
33847@@ -183,46 +249,48 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33848 */
33849 flush_tlb_mm(mm);
33850 }
33851+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
33852+#define PREALLOCATED_PXDS USER_PGD_PTRS
33853 #else /* !CONFIG_X86_PAE */
33854
33855 /* No need to prepopulate any pagetable entries in non-PAE modes. */
33856-#define PREALLOCATED_PMDS 0
33857+#define PREALLOCATED_PXDS 0
33858
33859 #endif /* CONFIG_X86_PAE */
33860
33861-static void free_pmds(struct mm_struct *mm, pmd_t *pmds[])
33862+static void free_pxds(struct mm_struct *mm, pxd_t *pxds[])
33863 {
33864 int i;
33865
33866- for(i = 0; i < PREALLOCATED_PMDS; i++)
33867- if (pmds[i]) {
33868- pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
33869- free_page((unsigned long)pmds[i]);
33870- mm_dec_nr_pmds(mm);
33871+ for(i = 0; i < PREALLOCATED_PXDS; i++)
33872+ if (pxds[i]) {
33873+ pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
33874+ free_page((unsigned long)pxds[i]);
33875+ mm_dec_nr_pxds(mm);
33876 }
33877 }
33878
33879-static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
33880+static int preallocate_pxds(struct mm_struct *mm, pxd_t *pxds[])
33881 {
33882 int i;
33883 bool failed = false;
33884
33885- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33886- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
33887- if (!pmd)
33888+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33889+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
33890+ if (!pxd)
33891 failed = true;
33892- if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
33893- free_page((unsigned long)pmd);
33894- pmd = NULL;
33895+ if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
33896+ free_page((unsigned long)pxd);
33897+ pxd = NULL;
33898 failed = true;
33899 }
33900- if (pmd)
33901- mm_inc_nr_pmds(mm);
33902- pmds[i] = pmd;
33903+ if (pxd)
33904+ mm_inc_nr_pxds(mm);
33905+ pxds[i] = pxd;
33906 }
33907
33908 if (failed) {
33909- free_pmds(mm, pmds);
33910+ free_pxds(mm, pxds);
33911 return -ENOMEM;
33912 }
33913
33914@@ -235,50 +303,54 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
33915 * preallocate which never got a corresponding vma will need to be
33916 * freed manually.
33917 */
33918-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
33919+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
33920 {
33921 int i;
33922
33923- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33924+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33925 pgd_t pgd = pgdp[i];
33926
33927 if (pgd_val(pgd) != 0) {
33928- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
33929+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
33930
33931- pgdp[i] = native_make_pgd(0);
33932+ set_pgd(pgdp + i, native_make_pgd(0));
33933
33934- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
33935- pmd_free(mm, pmd);
33936- mm_dec_nr_pmds(mm);
33937+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
33938+ pxd_free(mm, pxd);
33939+ mm_dec_nr_pxds(mm);
33940 }
33941 }
33942 }
33943
33944-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
33945+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
33946 {
33947- pud_t *pud;
33948+ pyd_t *pyd;
33949 int i;
33950
33951- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
33952+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
33953 return;
33954
33955- pud = pud_offset(pgd, 0);
33956+#ifdef CONFIG_X86_64
33957+ pyd = pyd_offset(mm, 0L);
33958+#else
33959+ pyd = pyd_offset(pgd, 0L);
33960+#endif
33961
33962- for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
33963- pmd_t *pmd = pmds[i];
33964+ for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
33965+ pxd_t *pxd = pxds[i];
33966
33967 if (i >= KERNEL_PGD_BOUNDARY)
33968- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33969- sizeof(pmd_t) * PTRS_PER_PMD);
33970+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33971+ sizeof(pxd_t) * PTRS_PER_PMD);
33972
33973- pud_populate(mm, pud, pmd);
33974+ pyd_populate(mm, pyd, pxd);
33975 }
33976 }
33977
33978 pgd_t *pgd_alloc(struct mm_struct *mm)
33979 {
33980 pgd_t *pgd;
33981- pmd_t *pmds[PREALLOCATED_PMDS];
33982+ pxd_t *pxds[PREALLOCATED_PXDS];
33983
33984 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
33985
33986@@ -287,11 +359,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
33987
33988 mm->pgd = pgd;
33989
33990- if (preallocate_pmds(mm, pmds) != 0)
33991+ if (preallocate_pxds(mm, pxds) != 0)
33992 goto out_free_pgd;
33993
33994 if (paravirt_pgd_alloc(mm) != 0)
33995- goto out_free_pmds;
33996+ goto out_free_pxds;
33997
33998 /*
33999 * Make sure that pre-populating the pmds is atomic with
34000@@ -301,14 +373,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
34001 spin_lock(&pgd_lock);
34002
34003 pgd_ctor(mm, pgd);
34004- pgd_prepopulate_pmd(mm, pgd, pmds);
34005+ pgd_prepopulate_pxd(mm, pgd, pxds);
34006
34007 spin_unlock(&pgd_lock);
34008
34009 return pgd;
34010
34011-out_free_pmds:
34012- free_pmds(mm, pmds);
34013+out_free_pxds:
34014+ free_pxds(mm, pxds);
34015 out_free_pgd:
34016 free_page((unsigned long)pgd);
34017 out:
34018@@ -317,7 +389,7 @@ out:
34019
34020 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
34021 {
34022- pgd_mop_up_pmds(mm, pgd);
34023+ pgd_mop_up_pxds(mm, pgd);
34024 pgd_dtor(pgd);
34025 paravirt_pgd_free(mm, pgd);
34026 free_page((unsigned long)pgd);
34027diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
34028index 75cc097..79a097f 100644
34029--- a/arch/x86/mm/pgtable_32.c
34030+++ b/arch/x86/mm/pgtable_32.c
34031@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
34032 return;
34033 }
34034 pte = pte_offset_kernel(pmd, vaddr);
34035+
34036+ pax_open_kernel();
34037 if (pte_val(pteval))
34038 set_pte_at(&init_mm, vaddr, pte, pteval);
34039 else
34040 pte_clear(&init_mm, vaddr, pte);
34041+ pax_close_kernel();
34042
34043 /*
34044 * It's enough to flush this one mapping.
34045diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
34046index e666cbb..61788c45 100644
34047--- a/arch/x86/mm/physaddr.c
34048+++ b/arch/x86/mm/physaddr.c
34049@@ -10,7 +10,7 @@
34050 #ifdef CONFIG_X86_64
34051
34052 #ifdef CONFIG_DEBUG_VIRTUAL
34053-unsigned long __phys_addr(unsigned long x)
34054+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34055 {
34056 unsigned long y = x - __START_KERNEL_map;
34057
34058@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
34059 #else
34060
34061 #ifdef CONFIG_DEBUG_VIRTUAL
34062-unsigned long __phys_addr(unsigned long x)
34063+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34064 {
34065 unsigned long phys_addr = x - PAGE_OFFSET;
34066 /* VMALLOC_* aren't constants */
34067diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
34068index 90555bf..f5f1828 100644
34069--- a/arch/x86/mm/setup_nx.c
34070+++ b/arch/x86/mm/setup_nx.c
34071@@ -5,8 +5,10 @@
34072 #include <asm/pgtable.h>
34073 #include <asm/proto.h>
34074
34075+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34076 static int disable_nx;
34077
34078+#ifndef CONFIG_PAX_PAGEEXEC
34079 /*
34080 * noexec = on|off
34081 *
34082@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
34083 return 0;
34084 }
34085 early_param("noexec", noexec_setup);
34086+#endif
34087+
34088+#endif
34089
34090 void x86_configure_nx(void)
34091 {
34092+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34093 if (cpu_has_nx && !disable_nx)
34094 __supported_pte_mask |= _PAGE_NX;
34095 else
34096+#endif
34097 __supported_pte_mask &= ~_PAGE_NX;
34098 }
34099
34100diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
34101index 3250f23..7a97ba2 100644
34102--- a/arch/x86/mm/tlb.c
34103+++ b/arch/x86/mm/tlb.c
34104@@ -45,7 +45,11 @@ void leave_mm(int cpu)
34105 BUG();
34106 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
34107 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
34108+
34109+#ifndef CONFIG_PAX_PER_CPU_PGD
34110 load_cr3(swapper_pg_dir);
34111+#endif
34112+
34113 /*
34114 * This gets called in the idle path where RCU
34115 * functions differently. Tracing normally
34116diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
34117new file mode 100644
34118index 0000000..dace51c
34119--- /dev/null
34120+++ b/arch/x86/mm/uderef_64.c
34121@@ -0,0 +1,37 @@
34122+#include <linux/mm.h>
34123+#include <asm/pgtable.h>
34124+#include <asm/uaccess.h>
34125+
34126+#ifdef CONFIG_PAX_MEMORY_UDEREF
34127+/* PaX: due to the special call convention these functions must
34128+ * - remain leaf functions under all configurations,
34129+ * - never be called directly, only dereferenced from the wrappers.
34130+ */
34131+void __pax_open_userland(void)
34132+{
34133+ unsigned int cpu;
34134+
34135+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34136+ return;
34137+
34138+ cpu = raw_get_cpu();
34139+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
34140+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
34141+ raw_put_cpu_no_resched();
34142+}
34143+EXPORT_SYMBOL(__pax_open_userland);
34144+
34145+void __pax_close_userland(void)
34146+{
34147+ unsigned int cpu;
34148+
34149+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34150+ return;
34151+
34152+ cpu = raw_get_cpu();
34153+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
34154+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
34155+ raw_put_cpu_no_resched();
34156+}
34157+EXPORT_SYMBOL(__pax_close_userland);
34158+#endif
34159diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
34160index 6440221..f84b5c7 100644
34161--- a/arch/x86/net/bpf_jit.S
34162+++ b/arch/x86/net/bpf_jit.S
34163@@ -9,6 +9,7 @@
34164 */
34165 #include <linux/linkage.h>
34166 #include <asm/dwarf2.h>
34167+#include <asm/alternative-asm.h>
34168
34169 /*
34170 * Calling convention :
34171@@ -38,6 +39,7 @@ sk_load_word_positive_offset:
34172 jle bpf_slow_path_word
34173 mov (SKBDATA,%rsi),%eax
34174 bswap %eax /* ntohl() */
34175+ pax_force_retaddr
34176 ret
34177
34178 sk_load_half:
34179@@ -55,6 +57,7 @@ sk_load_half_positive_offset:
34180 jle bpf_slow_path_half
34181 movzwl (SKBDATA,%rsi),%eax
34182 rol $8,%ax # ntohs()
34183+ pax_force_retaddr
34184 ret
34185
34186 sk_load_byte:
34187@@ -69,6 +72,7 @@ sk_load_byte_positive_offset:
34188 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
34189 jle bpf_slow_path_byte
34190 movzbl (SKBDATA,%rsi),%eax
34191+ pax_force_retaddr
34192 ret
34193
34194 /* rsi contains offset and can be scratched */
34195@@ -90,6 +94,7 @@ bpf_slow_path_word:
34196 js bpf_error
34197 mov - MAX_BPF_STACK + 32(%rbp),%eax
34198 bswap %eax
34199+ pax_force_retaddr
34200 ret
34201
34202 bpf_slow_path_half:
34203@@ -98,12 +103,14 @@ bpf_slow_path_half:
34204 mov - MAX_BPF_STACK + 32(%rbp),%ax
34205 rol $8,%ax
34206 movzwl %ax,%eax
34207+ pax_force_retaddr
34208 ret
34209
34210 bpf_slow_path_byte:
34211 bpf_slow_path_common(1)
34212 js bpf_error
34213 movzbl - MAX_BPF_STACK + 32(%rbp),%eax
34214+ pax_force_retaddr
34215 ret
34216
34217 #define sk_negative_common(SIZE) \
34218@@ -126,6 +133,7 @@ sk_load_word_negative_offset:
34219 sk_negative_common(4)
34220 mov (%rax), %eax
34221 bswap %eax
34222+ pax_force_retaddr
34223 ret
34224
34225 bpf_slow_path_half_neg:
34226@@ -137,6 +145,7 @@ sk_load_half_negative_offset:
34227 mov (%rax),%ax
34228 rol $8,%ax
34229 movzwl %ax,%eax
34230+ pax_force_retaddr
34231 ret
34232
34233 bpf_slow_path_byte_neg:
34234@@ -146,6 +155,7 @@ sk_load_byte_negative_offset:
34235 .globl sk_load_byte_negative_offset
34236 sk_negative_common(1)
34237 movzbl (%rax), %eax
34238+ pax_force_retaddr
34239 ret
34240
34241 bpf_error:
34242@@ -156,4 +166,5 @@ bpf_error:
34243 mov - MAX_BPF_STACK + 16(%rbp),%r14
34244 mov - MAX_BPF_STACK + 24(%rbp),%r15
34245 leaveq
34246+ pax_force_retaddr
34247 ret
34248diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
34249index 9875143..36776ae 100644
34250--- a/arch/x86/net/bpf_jit_comp.c
34251+++ b/arch/x86/net/bpf_jit_comp.c
34252@@ -13,7 +13,11 @@
34253 #include <linux/if_vlan.h>
34254 #include <asm/cacheflush.h>
34255
34256+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
34257+int bpf_jit_enable __read_only;
34258+#else
34259 int bpf_jit_enable __read_mostly;
34260+#endif
34261
34262 /*
34263 * assembly code in arch/x86/net/bpf_jit.S
34264@@ -174,7 +178,9 @@ static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
34265 static void jit_fill_hole(void *area, unsigned int size)
34266 {
34267 /* fill whole space with int3 instructions */
34268+ pax_open_kernel();
34269 memset(area, 0xcc, size);
34270+ pax_close_kernel();
34271 }
34272
34273 struct jit_context {
34274@@ -559,6 +565,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
34275 if (is_ereg(dst_reg))
34276 EMIT1(0x41);
34277 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
34278+
34279+ /* emit 'movzwl eax, ax' */
34280+ if (is_ereg(dst_reg))
34281+ EMIT3(0x45, 0x0F, 0xB7);
34282+ else
34283+ EMIT2(0x0F, 0xB7);
34284+ EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
34285 break;
34286 case 32:
34287 /* emit 'bswap eax' to swap lower 4 bytes */
34288@@ -577,6 +590,27 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
34289 break;
34290
34291 case BPF_ALU | BPF_END | BPF_FROM_LE:
34292+ switch (imm32) {
34293+ case 16:
34294+ /* emit 'movzwl eax, ax' to zero extend 16-bit
34295+ * into 64 bit
34296+ */
34297+ if (is_ereg(dst_reg))
34298+ EMIT3(0x45, 0x0F, 0xB7);
34299+ else
34300+ EMIT2(0x0F, 0xB7);
34301+ EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
34302+ break;
34303+ case 32:
34304+ /* emit 'mov eax, eax' to clear upper 32-bits */
34305+ if (is_ereg(dst_reg))
34306+ EMIT1(0x45);
34307+ EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
34308+ break;
34309+ case 64:
34310+ /* nop */
34311+ break;
34312+ }
34313 break;
34314
34315 /* ST: *(u8*)(dst_reg + off) = imm */
34316@@ -896,7 +930,9 @@ common_load:
34317 pr_err("bpf_jit_compile fatal error\n");
34318 return -EFAULT;
34319 }
34320+ pax_open_kernel();
34321 memcpy(image + proglen, temp, ilen);
34322+ pax_close_kernel();
34323 }
34324 proglen += ilen;
34325 addrs[i] = proglen;
34326@@ -968,7 +1004,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
34327
34328 if (image) {
34329 bpf_flush_icache(header, image + proglen);
34330- set_memory_ro((unsigned long)header, header->pages);
34331 prog->bpf_func = (void *)image;
34332 prog->jited = true;
34333 }
34334@@ -981,12 +1016,8 @@ void bpf_jit_free(struct bpf_prog *fp)
34335 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
34336 struct bpf_binary_header *header = (void *)addr;
34337
34338- if (!fp->jited)
34339- goto free_filter;
34340+ if (fp->jited)
34341+ bpf_jit_binary_free(header);
34342
34343- set_memory_rw(addr, header->pages);
34344- bpf_jit_binary_free(header);
34345-
34346-free_filter:
34347 bpf_prog_unlock_free(fp);
34348 }
34349diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
34350index 5d04be5..2beeaa2 100644
34351--- a/arch/x86/oprofile/backtrace.c
34352+++ b/arch/x86/oprofile/backtrace.c
34353@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
34354 struct stack_frame_ia32 *fp;
34355 unsigned long bytes;
34356
34357- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34358+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34359 if (bytes != 0)
34360 return NULL;
34361
34362- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
34363+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
34364
34365 oprofile_add_trace(bufhead[0].return_address);
34366
34367@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
34368 struct stack_frame bufhead[2];
34369 unsigned long bytes;
34370
34371- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34372+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34373 if (bytes != 0)
34374 return NULL;
34375
34376@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
34377 {
34378 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
34379
34380- if (!user_mode_vm(regs)) {
34381+ if (!user_mode(regs)) {
34382 unsigned long stack = kernel_stack_pointer(regs);
34383 if (depth)
34384 dump_trace(NULL, regs, (unsigned long *)stack, 0,
34385diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
34386index 1d2e639..f6ef82a 100644
34387--- a/arch/x86/oprofile/nmi_int.c
34388+++ b/arch/x86/oprofile/nmi_int.c
34389@@ -23,6 +23,7 @@
34390 #include <asm/nmi.h>
34391 #include <asm/msr.h>
34392 #include <asm/apic.h>
34393+#include <asm/pgtable.h>
34394
34395 #include "op_counter.h"
34396 #include "op_x86_model.h"
34397@@ -785,8 +786,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
34398 if (ret)
34399 return ret;
34400
34401- if (!model->num_virt_counters)
34402- model->num_virt_counters = model->num_counters;
34403+ if (!model->num_virt_counters) {
34404+ pax_open_kernel();
34405+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
34406+ pax_close_kernel();
34407+ }
34408
34409 mux_init(ops);
34410
34411diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
34412index 50d86c0..7985318 100644
34413--- a/arch/x86/oprofile/op_model_amd.c
34414+++ b/arch/x86/oprofile/op_model_amd.c
34415@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
34416 num_counters = AMD64_NUM_COUNTERS;
34417 }
34418
34419- op_amd_spec.num_counters = num_counters;
34420- op_amd_spec.num_controls = num_counters;
34421- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34422+ pax_open_kernel();
34423+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
34424+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
34425+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34426+ pax_close_kernel();
34427
34428 return 0;
34429 }
34430diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
34431index d90528e..0127e2b 100644
34432--- a/arch/x86/oprofile/op_model_ppro.c
34433+++ b/arch/x86/oprofile/op_model_ppro.c
34434@@ -19,6 +19,7 @@
34435 #include <asm/msr.h>
34436 #include <asm/apic.h>
34437 #include <asm/nmi.h>
34438+#include <asm/pgtable.h>
34439
34440 #include "op_x86_model.h"
34441 #include "op_counter.h"
34442@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
34443
34444 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
34445
34446- op_arch_perfmon_spec.num_counters = num_counters;
34447- op_arch_perfmon_spec.num_controls = num_counters;
34448+ pax_open_kernel();
34449+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
34450+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
34451+ pax_close_kernel();
34452 }
34453
34454 static int arch_perfmon_init(struct oprofile_operations *ignore)
34455diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
34456index 71e8a67..6a313bb 100644
34457--- a/arch/x86/oprofile/op_x86_model.h
34458+++ b/arch/x86/oprofile/op_x86_model.h
34459@@ -52,7 +52,7 @@ struct op_x86_model_spec {
34460 void (*switch_ctrl)(struct op_x86_model_spec const *model,
34461 struct op_msrs const * const msrs);
34462 #endif
34463-};
34464+} __do_const;
34465
34466 struct op_counter_config;
34467
34468diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
34469index 852aa4c..71613f2 100644
34470--- a/arch/x86/pci/intel_mid_pci.c
34471+++ b/arch/x86/pci/intel_mid_pci.c
34472@@ -258,7 +258,7 @@ int __init intel_mid_pci_init(void)
34473 pci_mmcfg_late_init();
34474 pcibios_enable_irq = intel_mid_pci_irq_enable;
34475 pcibios_disable_irq = intel_mid_pci_irq_disable;
34476- pci_root_ops = intel_mid_pci_ops;
34477+ memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
34478 pci_soc_mode = 1;
34479 /* Continue with standard init */
34480 return 1;
34481diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
34482index 5dc6ca5..25c03f5 100644
34483--- a/arch/x86/pci/irq.c
34484+++ b/arch/x86/pci/irq.c
34485@@ -51,7 +51,7 @@ struct irq_router {
34486 struct irq_router_handler {
34487 u16 vendor;
34488 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
34489-};
34490+} __do_const;
34491
34492 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
34493 void (*pcibios_disable_irq)(struct pci_dev *dev) = pirq_disable_irq;
34494@@ -791,7 +791,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
34495 return 0;
34496 }
34497
34498-static __initdata struct irq_router_handler pirq_routers[] = {
34499+static __initconst const struct irq_router_handler pirq_routers[] = {
34500 { PCI_VENDOR_ID_INTEL, intel_router_probe },
34501 { PCI_VENDOR_ID_AL, ali_router_probe },
34502 { PCI_VENDOR_ID_ITE, ite_router_probe },
34503@@ -818,7 +818,7 @@ static struct pci_dev *pirq_router_dev;
34504 static void __init pirq_find_router(struct irq_router *r)
34505 {
34506 struct irq_routing_table *rt = pirq_table;
34507- struct irq_router_handler *h;
34508+ const struct irq_router_handler *h;
34509
34510 #ifdef CONFIG_PCI_BIOS
34511 if (!rt->signature) {
34512@@ -1091,7 +1091,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
34513 return 0;
34514 }
34515
34516-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
34517+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
34518 {
34519 .callback = fix_broken_hp_bios_irq9,
34520 .ident = "HP Pavilion N5400 Series Laptop",
34521diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
34522index 9b83b90..4112152 100644
34523--- a/arch/x86/pci/pcbios.c
34524+++ b/arch/x86/pci/pcbios.c
34525@@ -79,7 +79,7 @@ union bios32 {
34526 static struct {
34527 unsigned long address;
34528 unsigned short segment;
34529-} bios32_indirect __initdata = { 0, __KERNEL_CS };
34530+} bios32_indirect __initconst = { 0, __PCIBIOS_CS };
34531
34532 /*
34533 * Returns the entry point for the given service, NULL on error
34534@@ -92,37 +92,80 @@ static unsigned long __init bios32_service(unsigned long service)
34535 unsigned long length; /* %ecx */
34536 unsigned long entry; /* %edx */
34537 unsigned long flags;
34538+ struct desc_struct d, *gdt;
34539
34540 local_irq_save(flags);
34541- __asm__("lcall *(%%edi); cld"
34542+
34543+ gdt = get_cpu_gdt_table(smp_processor_id());
34544+
34545+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
34546+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34547+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
34548+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34549+
34550+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
34551 : "=a" (return_code),
34552 "=b" (address),
34553 "=c" (length),
34554 "=d" (entry)
34555 : "0" (service),
34556 "1" (0),
34557- "D" (&bios32_indirect));
34558+ "D" (&bios32_indirect),
34559+ "r"(__PCIBIOS_DS)
34560+ : "memory");
34561+
34562+ pax_open_kernel();
34563+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
34564+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
34565+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
34566+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
34567+ pax_close_kernel();
34568+
34569 local_irq_restore(flags);
34570
34571 switch (return_code) {
34572- case 0:
34573- return address + entry;
34574- case 0x80: /* Not present */
34575- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34576- return 0;
34577- default: /* Shouldn't happen */
34578- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34579- service, return_code);
34580+ case 0: {
34581+ int cpu;
34582+ unsigned char flags;
34583+
34584+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
34585+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
34586+ printk(KERN_WARNING "bios32_service: not valid\n");
34587 return 0;
34588+ }
34589+ address = address + PAGE_OFFSET;
34590+ length += 16UL; /* some BIOSs underreport this... */
34591+ flags = 4;
34592+ if (length >= 64*1024*1024) {
34593+ length >>= PAGE_SHIFT;
34594+ flags |= 8;
34595+ }
34596+
34597+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
34598+ gdt = get_cpu_gdt_table(cpu);
34599+ pack_descriptor(&d, address, length, 0x9b, flags);
34600+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34601+ pack_descriptor(&d, address, length, 0x93, flags);
34602+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34603+ }
34604+ return entry;
34605+ }
34606+ case 0x80: /* Not present */
34607+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34608+ return 0;
34609+ default: /* Shouldn't happen */
34610+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34611+ service, return_code);
34612+ return 0;
34613 }
34614 }
34615
34616 static struct {
34617 unsigned long address;
34618 unsigned short segment;
34619-} pci_indirect = { 0, __KERNEL_CS };
34620+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
34621
34622-static int pci_bios_present;
34623+static int pci_bios_present __read_only;
34624
34625 static int __init check_pcibios(void)
34626 {
34627@@ -131,11 +174,13 @@ static int __init check_pcibios(void)
34628 unsigned long flags, pcibios_entry;
34629
34630 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
34631- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
34632+ pci_indirect.address = pcibios_entry;
34633
34634 local_irq_save(flags);
34635- __asm__(
34636- "lcall *(%%edi); cld\n\t"
34637+ __asm__("movw %w6, %%ds\n\t"
34638+ "lcall *%%ss:(%%edi); cld\n\t"
34639+ "push %%ss\n\t"
34640+ "pop %%ds\n\t"
34641 "jc 1f\n\t"
34642 "xor %%ah, %%ah\n"
34643 "1:"
34644@@ -144,7 +189,8 @@ static int __init check_pcibios(void)
34645 "=b" (ebx),
34646 "=c" (ecx)
34647 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
34648- "D" (&pci_indirect)
34649+ "D" (&pci_indirect),
34650+ "r" (__PCIBIOS_DS)
34651 : "memory");
34652 local_irq_restore(flags);
34653
34654@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34655
34656 switch (len) {
34657 case 1:
34658- __asm__("lcall *(%%esi); cld\n\t"
34659+ __asm__("movw %w6, %%ds\n\t"
34660+ "lcall *%%ss:(%%esi); cld\n\t"
34661+ "push %%ss\n\t"
34662+ "pop %%ds\n\t"
34663 "jc 1f\n\t"
34664 "xor %%ah, %%ah\n"
34665 "1:"
34666@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34667 : "1" (PCIBIOS_READ_CONFIG_BYTE),
34668 "b" (bx),
34669 "D" ((long)reg),
34670- "S" (&pci_indirect));
34671+ "S" (&pci_indirect),
34672+ "r" (__PCIBIOS_DS));
34673 /*
34674 * Zero-extend the result beyond 8 bits, do not trust the
34675 * BIOS having done it:
34676@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34677 *value &= 0xff;
34678 break;
34679 case 2:
34680- __asm__("lcall *(%%esi); cld\n\t"
34681+ __asm__("movw %w6, %%ds\n\t"
34682+ "lcall *%%ss:(%%esi); cld\n\t"
34683+ "push %%ss\n\t"
34684+ "pop %%ds\n\t"
34685 "jc 1f\n\t"
34686 "xor %%ah, %%ah\n"
34687 "1:"
34688@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34689 : "1" (PCIBIOS_READ_CONFIG_WORD),
34690 "b" (bx),
34691 "D" ((long)reg),
34692- "S" (&pci_indirect));
34693+ "S" (&pci_indirect),
34694+ "r" (__PCIBIOS_DS));
34695 /*
34696 * Zero-extend the result beyond 16 bits, do not trust the
34697 * BIOS having done it:
34698@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34699 *value &= 0xffff;
34700 break;
34701 case 4:
34702- __asm__("lcall *(%%esi); cld\n\t"
34703+ __asm__("movw %w6, %%ds\n\t"
34704+ "lcall *%%ss:(%%esi); cld\n\t"
34705+ "push %%ss\n\t"
34706+ "pop %%ds\n\t"
34707 "jc 1f\n\t"
34708 "xor %%ah, %%ah\n"
34709 "1:"
34710@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34711 : "1" (PCIBIOS_READ_CONFIG_DWORD),
34712 "b" (bx),
34713 "D" ((long)reg),
34714- "S" (&pci_indirect));
34715+ "S" (&pci_indirect),
34716+ "r" (__PCIBIOS_DS));
34717 break;
34718 }
34719
34720@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34721
34722 switch (len) {
34723 case 1:
34724- __asm__("lcall *(%%esi); cld\n\t"
34725+ __asm__("movw %w6, %%ds\n\t"
34726+ "lcall *%%ss:(%%esi); cld\n\t"
34727+ "push %%ss\n\t"
34728+ "pop %%ds\n\t"
34729 "jc 1f\n\t"
34730 "xor %%ah, %%ah\n"
34731 "1:"
34732@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34733 "c" (value),
34734 "b" (bx),
34735 "D" ((long)reg),
34736- "S" (&pci_indirect));
34737+ "S" (&pci_indirect),
34738+ "r" (__PCIBIOS_DS));
34739 break;
34740 case 2:
34741- __asm__("lcall *(%%esi); cld\n\t"
34742+ __asm__("movw %w6, %%ds\n\t"
34743+ "lcall *%%ss:(%%esi); cld\n\t"
34744+ "push %%ss\n\t"
34745+ "pop %%ds\n\t"
34746 "jc 1f\n\t"
34747 "xor %%ah, %%ah\n"
34748 "1:"
34749@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34750 "c" (value),
34751 "b" (bx),
34752 "D" ((long)reg),
34753- "S" (&pci_indirect));
34754+ "S" (&pci_indirect),
34755+ "r" (__PCIBIOS_DS));
34756 break;
34757 case 4:
34758- __asm__("lcall *(%%esi); cld\n\t"
34759+ __asm__("movw %w6, %%ds\n\t"
34760+ "lcall *%%ss:(%%esi); cld\n\t"
34761+ "push %%ss\n\t"
34762+ "pop %%ds\n\t"
34763 "jc 1f\n\t"
34764 "xor %%ah, %%ah\n"
34765 "1:"
34766@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34767 "c" (value),
34768 "b" (bx),
34769 "D" ((long)reg),
34770- "S" (&pci_indirect));
34771+ "S" (&pci_indirect),
34772+ "r" (__PCIBIOS_DS));
34773 break;
34774 }
34775
34776@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34777
34778 DBG("PCI: Fetching IRQ routing table... ");
34779 __asm__("push %%es\n\t"
34780+ "movw %w8, %%ds\n\t"
34781 "push %%ds\n\t"
34782 "pop %%es\n\t"
34783- "lcall *(%%esi); cld\n\t"
34784+ "lcall *%%ss:(%%esi); cld\n\t"
34785 "pop %%es\n\t"
34786+ "push %%ss\n\t"
34787+ "pop %%ds\n"
34788 "jc 1f\n\t"
34789 "xor %%ah, %%ah\n"
34790 "1:"
34791@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34792 "1" (0),
34793 "D" ((long) &opt),
34794 "S" (&pci_indirect),
34795- "m" (opt)
34796+ "m" (opt),
34797+ "r" (__PCIBIOS_DS)
34798 : "memory");
34799 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
34800 if (ret & 0xff00)
34801@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34802 {
34803 int ret;
34804
34805- __asm__("lcall *(%%esi); cld\n\t"
34806+ __asm__("movw %w5, %%ds\n\t"
34807+ "lcall *%%ss:(%%esi); cld\n\t"
34808+ "push %%ss\n\t"
34809+ "pop %%ds\n"
34810 "jc 1f\n\t"
34811 "xor %%ah, %%ah\n"
34812 "1:"
34813@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34814 : "0" (PCIBIOS_SET_PCI_HW_INT),
34815 "b" ((dev->bus->number << 8) | dev->devfn),
34816 "c" ((irq << 8) | (pin + 10)),
34817- "S" (&pci_indirect));
34818+ "S" (&pci_indirect),
34819+ "r" (__PCIBIOS_DS));
34820 return !(ret & 0xff00);
34821 }
34822 EXPORT_SYMBOL(pcibios_set_irq_routing);
34823diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
34824index 40e7cda..c7e6672 100644
34825--- a/arch/x86/platform/efi/efi_32.c
34826+++ b/arch/x86/platform/efi/efi_32.c
34827@@ -61,11 +61,22 @@ void __init efi_call_phys_prolog(void)
34828 {
34829 struct desc_ptr gdt_descr;
34830
34831+#ifdef CONFIG_PAX_KERNEXEC
34832+ struct desc_struct d;
34833+#endif
34834+
34835 local_irq_save(efi_rt_eflags);
34836
34837 load_cr3(initial_page_table);
34838 __flush_tlb_all();
34839
34840+#ifdef CONFIG_PAX_KERNEXEC
34841+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
34842+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34843+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
34844+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34845+#endif
34846+
34847 gdt_descr.address = __pa(get_cpu_gdt_table(0));
34848 gdt_descr.size = GDT_SIZE - 1;
34849 load_gdt(&gdt_descr);
34850@@ -75,11 +86,24 @@ void __init efi_call_phys_epilog(void)
34851 {
34852 struct desc_ptr gdt_descr;
34853
34854+#ifdef CONFIG_PAX_KERNEXEC
34855+ struct desc_struct d;
34856+
34857+ memset(&d, 0, sizeof d);
34858+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34859+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34860+#endif
34861+
34862 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
34863 gdt_descr.size = GDT_SIZE - 1;
34864 load_gdt(&gdt_descr);
34865
34866+#ifdef CONFIG_PAX_PER_CPU_PGD
34867+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34868+#else
34869 load_cr3(swapper_pg_dir);
34870+#endif
34871+
34872 __flush_tlb_all();
34873
34874 local_irq_restore(efi_rt_eflags);
34875diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
34876index 17e80d8..9fa6e41 100644
34877--- a/arch/x86/platform/efi/efi_64.c
34878+++ b/arch/x86/platform/efi/efi_64.c
34879@@ -98,6 +98,11 @@ void __init efi_call_phys_prolog(void)
34880 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
34881 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
34882 }
34883+
34884+#ifdef CONFIG_PAX_PER_CPU_PGD
34885+ load_cr3(swapper_pg_dir);
34886+#endif
34887+
34888 __flush_tlb_all();
34889 }
34890
34891@@ -115,6 +120,11 @@ void __init efi_call_phys_epilog(void)
34892 for (pgd = 0; pgd < n_pgds; pgd++)
34893 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
34894 kfree(save_pgd);
34895+
34896+#ifdef CONFIG_PAX_PER_CPU_PGD
34897+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34898+#endif
34899+
34900 __flush_tlb_all();
34901 local_irq_restore(efi_flags);
34902 early_code_mapping_set_exec(0);
34903@@ -145,8 +155,23 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
34904 unsigned npages;
34905 pgd_t *pgd;
34906
34907- if (efi_enabled(EFI_OLD_MEMMAP))
34908+ if (efi_enabled(EFI_OLD_MEMMAP)) {
34909+ /* PaX: We need to disable the NX bit in the PGD, otherwise we won't be
34910+ * able to execute the EFI services.
34911+ */
34912+ if (__supported_pte_mask & _PAGE_NX) {
34913+ unsigned long addr = (unsigned long) __va(0);
34914+ pgd_t pe = __pgd(pgd_val(*pgd_offset_k(addr)) & ~_PAGE_NX);
34915+
34916+ pr_alert("PAX: Disabling NX protection for low memory map. Try booting without \"efi=old_map\"\n");
34917+#ifdef CONFIG_PAX_PER_CPU_PGD
34918+ set_pgd(pgd_offset_cpu(0, kernel, addr), pe);
34919+#endif
34920+ set_pgd(pgd_offset_k(addr), pe);
34921+ }
34922+
34923 return 0;
34924+ }
34925
34926 efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
34927 pgd = __va(efi_scratch.efi_pgt);
34928diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
34929index 040192b..7d3300f 100644
34930--- a/arch/x86/platform/efi/efi_stub_32.S
34931+++ b/arch/x86/platform/efi/efi_stub_32.S
34932@@ -6,7 +6,9 @@
34933 */
34934
34935 #include <linux/linkage.h>
34936+#include <linux/init.h>
34937 #include <asm/page_types.h>
34938+#include <asm/segment.h>
34939
34940 /*
34941 * efi_call_phys(void *, ...) is a function with variable parameters.
34942@@ -20,7 +22,7 @@
34943 * service functions will comply with gcc calling convention, too.
34944 */
34945
34946-.text
34947+__INIT
34948 ENTRY(efi_call_phys)
34949 /*
34950 * 0. The function can only be called in Linux kernel. So CS has been
34951@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
34952 * The mapping of lower virtual memory has been created in prolog and
34953 * epilog.
34954 */
34955- movl $1f, %edx
34956- subl $__PAGE_OFFSET, %edx
34957- jmp *%edx
34958+#ifdef CONFIG_PAX_KERNEXEC
34959+ movl $(__KERNEXEC_EFI_DS), %edx
34960+ mov %edx, %ds
34961+ mov %edx, %es
34962+ mov %edx, %ss
34963+ addl $2f,(1f)
34964+ ljmp *(1f)
34965+
34966+__INITDATA
34967+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
34968+.previous
34969+
34970+2:
34971+ subl $2b,(1b)
34972+#else
34973+ jmp 1f-__PAGE_OFFSET
34974 1:
34975+#endif
34976
34977 /*
34978 * 2. Now on the top of stack is the return
34979@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
34980 * parameter 2, ..., param n. To make things easy, we save the return
34981 * address of efi_call_phys in a global variable.
34982 */
34983- popl %edx
34984- movl %edx, saved_return_addr
34985- /* get the function pointer into ECX*/
34986- popl %ecx
34987- movl %ecx, efi_rt_function_ptr
34988- movl $2f, %edx
34989- subl $__PAGE_OFFSET, %edx
34990- pushl %edx
34991+ popl (saved_return_addr)
34992+ popl (efi_rt_function_ptr)
34993
34994 /*
34995 * 3. Clear PG bit in %CR0.
34996@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
34997 /*
34998 * 5. Call the physical function.
34999 */
35000- jmp *%ecx
35001+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
35002
35003-2:
35004 /*
35005 * 6. After EFI runtime service returns, control will return to
35006 * following instruction. We'd better readjust stack pointer first.
35007@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
35008 movl %cr0, %edx
35009 orl $0x80000000, %edx
35010 movl %edx, %cr0
35011- jmp 1f
35012-1:
35013+
35014 /*
35015 * 8. Now restore the virtual mode from flat mode by
35016 * adding EIP with PAGE_OFFSET.
35017 */
35018- movl $1f, %edx
35019- jmp *%edx
35020+#ifdef CONFIG_PAX_KERNEXEC
35021+ movl $(__KERNEL_DS), %edx
35022+ mov %edx, %ds
35023+ mov %edx, %es
35024+ mov %edx, %ss
35025+ ljmp $(__KERNEL_CS),$1f
35026+#else
35027+ jmp 1f+__PAGE_OFFSET
35028+#endif
35029 1:
35030
35031 /*
35032 * 9. Balance the stack. And because EAX contain the return value,
35033 * we'd better not clobber it.
35034 */
35035- leal efi_rt_function_ptr, %edx
35036- movl (%edx), %ecx
35037- pushl %ecx
35038+ pushl (efi_rt_function_ptr)
35039
35040 /*
35041- * 10. Push the saved return address onto the stack and return.
35042+ * 10. Return to the saved return address.
35043 */
35044- leal saved_return_addr, %edx
35045- movl (%edx), %ecx
35046- pushl %ecx
35047- ret
35048+ jmpl *(saved_return_addr)
35049 ENDPROC(efi_call_phys)
35050 .previous
35051
35052-.data
35053+__INITDATA
35054 saved_return_addr:
35055 .long 0
35056 efi_rt_function_ptr:
35057diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
35058index 86d0f9e..6d499f4 100644
35059--- a/arch/x86/platform/efi/efi_stub_64.S
35060+++ b/arch/x86/platform/efi/efi_stub_64.S
35061@@ -11,6 +11,7 @@
35062 #include <asm/msr.h>
35063 #include <asm/processor-flags.h>
35064 #include <asm/page_types.h>
35065+#include <asm/alternative-asm.h>
35066
35067 #define SAVE_XMM \
35068 mov %rsp, %rax; \
35069@@ -88,6 +89,7 @@ ENTRY(efi_call)
35070 RESTORE_PGT
35071 addq $48, %rsp
35072 RESTORE_XMM
35073+ pax_force_retaddr 0, 1
35074 ret
35075 ENDPROC(efi_call)
35076
35077diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
35078index 3005f0c..d06aeb0 100644
35079--- a/arch/x86/platform/intel-mid/intel-mid.c
35080+++ b/arch/x86/platform/intel-mid/intel-mid.c
35081@@ -63,7 +63,7 @@ enum intel_mid_timer_options intel_mid_timer_options;
35082 /* intel_mid_ops to store sub arch ops */
35083 struct intel_mid_ops *intel_mid_ops;
35084 /* getter function for sub arch ops*/
35085-static void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT;
35086+static const void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT;
35087 enum intel_mid_cpu_type __intel_mid_cpu_chip;
35088 EXPORT_SYMBOL_GPL(__intel_mid_cpu_chip);
35089
35090@@ -71,9 +71,10 @@ static void intel_mid_power_off(void)
35091 {
35092 };
35093
35094-static void intel_mid_reboot(void)
35095+static void __noreturn intel_mid_reboot(void)
35096 {
35097 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
35098+ BUG();
35099 }
35100
35101 static unsigned long __init intel_mid_calibrate_tsc(void)
35102diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35103index 3c1c386..59a68ed 100644
35104--- a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35105+++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35106@@ -13,6 +13,6 @@
35107 /* For every CPU addition a new get_<cpuname>_ops interface needs
35108 * to be added.
35109 */
35110-extern void *get_penwell_ops(void);
35111-extern void *get_cloverview_ops(void);
35112-extern void *get_tangier_ops(void);
35113+extern const void *get_penwell_ops(void);
35114+extern const void *get_cloverview_ops(void);
35115+extern const void *get_tangier_ops(void);
35116diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
35117index 23381d2..8ddc10e 100644
35118--- a/arch/x86/platform/intel-mid/mfld.c
35119+++ b/arch/x86/platform/intel-mid/mfld.c
35120@@ -64,12 +64,12 @@ static void __init penwell_arch_setup(void)
35121 pm_power_off = mfld_power_off;
35122 }
35123
35124-void *get_penwell_ops(void)
35125+const void *get_penwell_ops(void)
35126 {
35127 return &penwell_ops;
35128 }
35129
35130-void *get_cloverview_ops(void)
35131+const void *get_cloverview_ops(void)
35132 {
35133 return &penwell_ops;
35134 }
35135diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfl.c
35136index aaca917..66eadbc 100644
35137--- a/arch/x86/platform/intel-mid/mrfl.c
35138+++ b/arch/x86/platform/intel-mid/mrfl.c
35139@@ -97,7 +97,7 @@ static struct intel_mid_ops tangier_ops = {
35140 .arch_setup = tangier_arch_setup,
35141 };
35142
35143-void *get_tangier_ops(void)
35144+const void *get_tangier_ops(void)
35145 {
35146 return &tangier_ops;
35147 }
35148diff --git a/arch/x86/platform/intel-quark/imr_selftest.c b/arch/x86/platform/intel-quark/imr_selftest.c
35149index c9a0838..fae0977 100644
35150--- a/arch/x86/platform/intel-quark/imr_selftest.c
35151+++ b/arch/x86/platform/intel-quark/imr_selftest.c
35152@@ -54,7 +54,7 @@ static void __init imr_self_test_result(int res, const char *fmt, ...)
35153 */
35154 static void __init imr_self_test(void)
35155 {
35156- phys_addr_t base = virt_to_phys(&_text);
35157+ phys_addr_t base = virt_to_phys(ktla_ktva(_text));
35158 size_t size = virt_to_phys(&__end_rodata) - base;
35159 const char *fmt_over = "overlapped IMR @ (0x%08lx - 0x%08lx)\n";
35160 int ret;
35161diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
35162index d6ee929..3637cb5 100644
35163--- a/arch/x86/platform/olpc/olpc_dt.c
35164+++ b/arch/x86/platform/olpc/olpc_dt.c
35165@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
35166 return res;
35167 }
35168
35169-static struct of_pdt_ops prom_olpc_ops __initdata = {
35170+static struct of_pdt_ops prom_olpc_ops __initconst = {
35171 .nextprop = olpc_dt_nextprop,
35172 .getproplen = olpc_dt_getproplen,
35173 .getproperty = olpc_dt_getproperty,
35174diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
35175index 3e32ed5..cc0adc5 100644
35176--- a/arch/x86/power/cpu.c
35177+++ b/arch/x86/power/cpu.c
35178@@ -134,11 +134,8 @@ static void do_fpu_end(void)
35179 static void fix_processor_context(void)
35180 {
35181 int cpu = smp_processor_id();
35182- struct tss_struct *t = &per_cpu(init_tss, cpu);
35183-#ifdef CONFIG_X86_64
35184- struct desc_struct *desc = get_cpu_gdt_table(cpu);
35185- tss_desc tss;
35186-#endif
35187+ struct tss_struct *t = init_tss + cpu;
35188+
35189 set_tss_desc(cpu, t); /*
35190 * This just modifies memory; should not be
35191 * necessary. But... This is necessary, because
35192@@ -147,10 +144,6 @@ static void fix_processor_context(void)
35193 */
35194
35195 #ifdef CONFIG_X86_64
35196- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
35197- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
35198- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
35199-
35200 syscall_init(); /* This sets MSR_*STAR and related */
35201 #endif
35202 load_TR_desc(); /* This does ltr */
35203diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
35204index 0b7a63d..0d0f2c2 100644
35205--- a/arch/x86/realmode/init.c
35206+++ b/arch/x86/realmode/init.c
35207@@ -68,7 +68,13 @@ void __init setup_real_mode(void)
35208 __va(real_mode_header->trampoline_header);
35209
35210 #ifdef CONFIG_X86_32
35211- trampoline_header->start = __pa_symbol(startup_32_smp);
35212+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
35213+
35214+#ifdef CONFIG_PAX_KERNEXEC
35215+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
35216+#endif
35217+
35218+ trampoline_header->boot_cs = __BOOT_CS;
35219 trampoline_header->gdt_limit = __BOOT_DS + 7;
35220 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
35221 #else
35222@@ -84,7 +90,7 @@ void __init setup_real_mode(void)
35223 *trampoline_cr4_features = __read_cr4();
35224
35225 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
35226- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
35227+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
35228 trampoline_pgd[511] = init_level4_pgt[511].pgd;
35229 #endif
35230 }
35231diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
35232index 2730d77..2e4cd19 100644
35233--- a/arch/x86/realmode/rm/Makefile
35234+++ b/arch/x86/realmode/rm/Makefile
35235@@ -68,5 +68,8 @@ $(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
35236
35237 KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
35238 -I$(srctree)/arch/x86/boot
35239+ifdef CONSTIFY_PLUGIN
35240+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
35241+endif
35242 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
35243 GCOV_PROFILE := n
35244diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
35245index a28221d..93c40f1 100644
35246--- a/arch/x86/realmode/rm/header.S
35247+++ b/arch/x86/realmode/rm/header.S
35248@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
35249 #endif
35250 /* APM/BIOS reboot */
35251 .long pa_machine_real_restart_asm
35252-#ifdef CONFIG_X86_64
35253+#ifdef CONFIG_X86_32
35254+ .long __KERNEL_CS
35255+#else
35256 .long __KERNEL32_CS
35257 #endif
35258 END(real_mode_header)
35259diff --git a/arch/x86/realmode/rm/reboot.S b/arch/x86/realmode/rm/reboot.S
35260index d66c607..3def845 100644
35261--- a/arch/x86/realmode/rm/reboot.S
35262+++ b/arch/x86/realmode/rm/reboot.S
35263@@ -27,6 +27,10 @@ ENTRY(machine_real_restart_asm)
35264 lgdtl pa_tr_gdt
35265
35266 /* Disable paging to drop us out of long mode */
35267+ movl %cr4, %eax
35268+ andl $~X86_CR4_PCIDE, %eax
35269+ movl %eax, %cr4
35270+
35271 movl %cr0, %eax
35272 andl $~X86_CR0_PG, %eax
35273 movl %eax, %cr0
35274diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
35275index 48ddd76..c26749f 100644
35276--- a/arch/x86/realmode/rm/trampoline_32.S
35277+++ b/arch/x86/realmode/rm/trampoline_32.S
35278@@ -24,6 +24,12 @@
35279 #include <asm/page_types.h>
35280 #include "realmode.h"
35281
35282+#ifdef CONFIG_PAX_KERNEXEC
35283+#define ta(X) (X)
35284+#else
35285+#define ta(X) (pa_ ## X)
35286+#endif
35287+
35288 .text
35289 .code16
35290
35291@@ -38,8 +44,6 @@ ENTRY(trampoline_start)
35292
35293 cli # We should be safe anyway
35294
35295- movl tr_start, %eax # where we need to go
35296-
35297 movl $0xA5A5A5A5, trampoline_status
35298 # write marker for master knows we're running
35299
35300@@ -55,7 +59,7 @@ ENTRY(trampoline_start)
35301 movw $1, %dx # protected mode (PE) bit
35302 lmsw %dx # into protected mode
35303
35304- ljmpl $__BOOT_CS, $pa_startup_32
35305+ ljmpl *(trampoline_header)
35306
35307 .section ".text32","ax"
35308 .code32
35309@@ -66,7 +70,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
35310 .balign 8
35311 GLOBAL(trampoline_header)
35312 tr_start: .space 4
35313- tr_gdt_pad: .space 2
35314+ tr_boot_cs: .space 2
35315 tr_gdt: .space 6
35316 END(trampoline_header)
35317
35318diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
35319index dac7b20..72dbaca 100644
35320--- a/arch/x86/realmode/rm/trampoline_64.S
35321+++ b/arch/x86/realmode/rm/trampoline_64.S
35322@@ -93,6 +93,7 @@ ENTRY(startup_32)
35323 movl %edx, %gs
35324
35325 movl pa_tr_cr4, %eax
35326+ andl $~X86_CR4_PCIDE, %eax
35327 movl %eax, %cr4 # Enable PAE mode
35328
35329 # Setup trampoline 4 level pagetables
35330@@ -106,7 +107,7 @@ ENTRY(startup_32)
35331 wrmsr
35332
35333 # Enable paging and in turn activate Long Mode
35334- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
35335+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
35336 movl %eax, %cr0
35337
35338 /*
35339diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
35340index 9e7e147..25a4158 100644
35341--- a/arch/x86/realmode/rm/wakeup_asm.S
35342+++ b/arch/x86/realmode/rm/wakeup_asm.S
35343@@ -126,11 +126,10 @@ ENTRY(wakeup_start)
35344 lgdtl pmode_gdt
35345
35346 /* This really couldn't... */
35347- movl pmode_entry, %eax
35348 movl pmode_cr0, %ecx
35349 movl %ecx, %cr0
35350- ljmpl $__KERNEL_CS, $pa_startup_32
35351- /* -> jmp *%eax in trampoline_32.S */
35352+
35353+ ljmpl *pmode_entry
35354 #else
35355 jmp trampoline_start
35356 #endif
35357diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
35358index 604a37e..e49702a 100644
35359--- a/arch/x86/tools/Makefile
35360+++ b/arch/x86/tools/Makefile
35361@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
35362
35363 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
35364
35365-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
35366+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
35367 hostprogs-y += relocs
35368 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
35369 PHONY += relocs
35370diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
35371index 0c2fae8..88036b7 100644
35372--- a/arch/x86/tools/relocs.c
35373+++ b/arch/x86/tools/relocs.c
35374@@ -1,5 +1,7 @@
35375 /* This is included from relocs_32/64.c */
35376
35377+#include "../../../include/generated/autoconf.h"
35378+
35379 #define ElfW(type) _ElfW(ELF_BITS, type)
35380 #define _ElfW(bits, type) __ElfW(bits, type)
35381 #define __ElfW(bits, type) Elf##bits##_##type
35382@@ -11,6 +13,7 @@
35383 #define Elf_Sym ElfW(Sym)
35384
35385 static Elf_Ehdr ehdr;
35386+static Elf_Phdr *phdr;
35387
35388 struct relocs {
35389 uint32_t *offset;
35390@@ -386,9 +389,39 @@ static void read_ehdr(FILE *fp)
35391 }
35392 }
35393
35394+static void read_phdrs(FILE *fp)
35395+{
35396+ unsigned int i;
35397+
35398+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
35399+ if (!phdr) {
35400+ die("Unable to allocate %d program headers\n",
35401+ ehdr.e_phnum);
35402+ }
35403+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
35404+ die("Seek to %d failed: %s\n",
35405+ ehdr.e_phoff, strerror(errno));
35406+ }
35407+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
35408+ die("Cannot read ELF program headers: %s\n",
35409+ strerror(errno));
35410+ }
35411+ for(i = 0; i < ehdr.e_phnum; i++) {
35412+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
35413+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
35414+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
35415+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
35416+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
35417+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
35418+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
35419+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
35420+ }
35421+
35422+}
35423+
35424 static void read_shdrs(FILE *fp)
35425 {
35426- int i;
35427+ unsigned int i;
35428 Elf_Shdr shdr;
35429
35430 secs = calloc(ehdr.e_shnum, sizeof(struct section));
35431@@ -423,7 +456,7 @@ static void read_shdrs(FILE *fp)
35432
35433 static void read_strtabs(FILE *fp)
35434 {
35435- int i;
35436+ unsigned int i;
35437 for (i = 0; i < ehdr.e_shnum; i++) {
35438 struct section *sec = &secs[i];
35439 if (sec->shdr.sh_type != SHT_STRTAB) {
35440@@ -448,7 +481,7 @@ static void read_strtabs(FILE *fp)
35441
35442 static void read_symtabs(FILE *fp)
35443 {
35444- int i,j;
35445+ unsigned int i,j;
35446 for (i = 0; i < ehdr.e_shnum; i++) {
35447 struct section *sec = &secs[i];
35448 if (sec->shdr.sh_type != SHT_SYMTAB) {
35449@@ -479,9 +512,11 @@ static void read_symtabs(FILE *fp)
35450 }
35451
35452
35453-static void read_relocs(FILE *fp)
35454+static void read_relocs(FILE *fp, int use_real_mode)
35455 {
35456- int i,j;
35457+ unsigned int i,j;
35458+ uint32_t base;
35459+
35460 for (i = 0; i < ehdr.e_shnum; i++) {
35461 struct section *sec = &secs[i];
35462 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35463@@ -501,9 +536,22 @@ static void read_relocs(FILE *fp)
35464 die("Cannot read symbol table: %s\n",
35465 strerror(errno));
35466 }
35467+ base = 0;
35468+
35469+#ifdef CONFIG_X86_32
35470+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
35471+ if (phdr[j].p_type != PT_LOAD )
35472+ continue;
35473+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
35474+ continue;
35475+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
35476+ break;
35477+ }
35478+#endif
35479+
35480 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
35481 Elf_Rel *rel = &sec->reltab[j];
35482- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
35483+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
35484 rel->r_info = elf_xword_to_cpu(rel->r_info);
35485 #if (SHT_REL_TYPE == SHT_RELA)
35486 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
35487@@ -515,7 +563,7 @@ static void read_relocs(FILE *fp)
35488
35489 static void print_absolute_symbols(void)
35490 {
35491- int i;
35492+ unsigned int i;
35493 const char *format;
35494
35495 if (ELF_BITS == 64)
35496@@ -528,7 +576,7 @@ static void print_absolute_symbols(void)
35497 for (i = 0; i < ehdr.e_shnum; i++) {
35498 struct section *sec = &secs[i];
35499 char *sym_strtab;
35500- int j;
35501+ unsigned int j;
35502
35503 if (sec->shdr.sh_type != SHT_SYMTAB) {
35504 continue;
35505@@ -555,7 +603,7 @@ static void print_absolute_symbols(void)
35506
35507 static void print_absolute_relocs(void)
35508 {
35509- int i, printed = 0;
35510+ unsigned int i, printed = 0;
35511 const char *format;
35512
35513 if (ELF_BITS == 64)
35514@@ -568,7 +616,7 @@ static void print_absolute_relocs(void)
35515 struct section *sec_applies, *sec_symtab;
35516 char *sym_strtab;
35517 Elf_Sym *sh_symtab;
35518- int j;
35519+ unsigned int j;
35520 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35521 continue;
35522 }
35523@@ -645,13 +693,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
35524 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
35525 Elf_Sym *sym, const char *symname))
35526 {
35527- int i;
35528+ unsigned int i;
35529 /* Walk through the relocations */
35530 for (i = 0; i < ehdr.e_shnum; i++) {
35531 char *sym_strtab;
35532 Elf_Sym *sh_symtab;
35533 struct section *sec_applies, *sec_symtab;
35534- int j;
35535+ unsigned int j;
35536 struct section *sec = &secs[i];
35537
35538 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35539@@ -830,6 +878,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35540 {
35541 unsigned r_type = ELF32_R_TYPE(rel->r_info);
35542 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
35543+ char *sym_strtab = sec->link->link->strtab;
35544+
35545+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
35546+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
35547+ return 0;
35548+
35549+#ifdef CONFIG_PAX_KERNEXEC
35550+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
35551+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
35552+ return 0;
35553+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
35554+ return 0;
35555+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
35556+ return 0;
35557+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
35558+ return 0;
35559+#endif
35560
35561 switch (r_type) {
35562 case R_386_NONE:
35563@@ -968,7 +1033,7 @@ static int write32_as_text(uint32_t v, FILE *f)
35564
35565 static void emit_relocs(int as_text, int use_real_mode)
35566 {
35567- int i;
35568+ unsigned int i;
35569 int (*write_reloc)(uint32_t, FILE *) = write32;
35570 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35571 const char *symname);
35572@@ -1078,10 +1143,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
35573 {
35574 regex_init(use_real_mode);
35575 read_ehdr(fp);
35576+ read_phdrs(fp);
35577 read_shdrs(fp);
35578 read_strtabs(fp);
35579 read_symtabs(fp);
35580- read_relocs(fp);
35581+ read_relocs(fp, use_real_mode);
35582 if (ELF_BITS == 64)
35583 percpu_init();
35584 if (show_absolute_syms) {
35585diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
35586index f40281e..92728c9 100644
35587--- a/arch/x86/um/mem_32.c
35588+++ b/arch/x86/um/mem_32.c
35589@@ -21,7 +21,7 @@ static int __init gate_vma_init(void)
35590 gate_vma.vm_start = FIXADDR_USER_START;
35591 gate_vma.vm_end = FIXADDR_USER_END;
35592 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
35593- gate_vma.vm_page_prot = __P101;
35594+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
35595
35596 return 0;
35597 }
35598diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
35599index 80ffa5b..a33bd15 100644
35600--- a/arch/x86/um/tls_32.c
35601+++ b/arch/x86/um/tls_32.c
35602@@ -260,7 +260,7 @@ out:
35603 if (unlikely(task == current &&
35604 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
35605 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
35606- "without flushed TLS.", current->pid);
35607+ "without flushed TLS.", task_pid_nr(current));
35608 }
35609
35610 return 0;
35611diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
35612index 7b9be98..39bb57f 100644
35613--- a/arch/x86/vdso/Makefile
35614+++ b/arch/x86/vdso/Makefile
35615@@ -175,7 +175,7 @@ quiet_cmd_vdso = VDSO $@
35616 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
35617 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
35618
35619-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35620+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35621 $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
35622 GCOV_PROFILE := n
35623
35624diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h
35625index 0224987..c7d65a5 100644
35626--- a/arch/x86/vdso/vdso2c.h
35627+++ b/arch/x86/vdso/vdso2c.h
35628@@ -12,7 +12,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
35629 unsigned long load_size = -1; /* Work around bogus warning */
35630 unsigned long mapping_size;
35631 ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
35632- int i;
35633+ unsigned int i;
35634 unsigned long j;
35635 ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
35636 *alt_sec = NULL;
35637diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
35638index e904c27..b9eaa03 100644
35639--- a/arch/x86/vdso/vdso32-setup.c
35640+++ b/arch/x86/vdso/vdso32-setup.c
35641@@ -14,6 +14,7 @@
35642 #include <asm/cpufeature.h>
35643 #include <asm/processor.h>
35644 #include <asm/vdso.h>
35645+#include <asm/mman.h>
35646
35647 #ifdef CONFIG_COMPAT_VDSO
35648 #define VDSO_DEFAULT 0
35649diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
35650index 1c9f750..cfddb1a 100644
35651--- a/arch/x86/vdso/vma.c
35652+++ b/arch/x86/vdso/vma.c
35653@@ -19,10 +19,7 @@
35654 #include <asm/page.h>
35655 #include <asm/hpet.h>
35656 #include <asm/desc.h>
35657-
35658-#if defined(CONFIG_X86_64)
35659-unsigned int __read_mostly vdso64_enabled = 1;
35660-#endif
35661+#include <asm/mman.h>
35662
35663 void __init init_vdso_image(const struct vdso_image *image)
35664 {
35665@@ -101,6 +98,11 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35666 .pages = no_pages,
35667 };
35668
35669+#ifdef CONFIG_PAX_RANDMMAP
35670+ if (mm->pax_flags & MF_PAX_RANDMMAP)
35671+ calculate_addr = false;
35672+#endif
35673+
35674 if (calculate_addr) {
35675 addr = vdso_addr(current->mm->start_stack,
35676 image->size - image->sym_vvar_start);
35677@@ -111,14 +113,14 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35678 down_write(&mm->mmap_sem);
35679
35680 addr = get_unmapped_area(NULL, addr,
35681- image->size - image->sym_vvar_start, 0, 0);
35682+ image->size - image->sym_vvar_start, 0, MAP_EXECUTABLE);
35683 if (IS_ERR_VALUE(addr)) {
35684 ret = addr;
35685 goto up_fail;
35686 }
35687
35688 text_start = addr - image->sym_vvar_start;
35689- current->mm->context.vdso = (void __user *)text_start;
35690+ mm->context.vdso = text_start;
35691
35692 /*
35693 * MAYWRITE to allow gdb to COW and set breakpoints
35694@@ -163,15 +165,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35695 hpet_address >> PAGE_SHIFT,
35696 PAGE_SIZE,
35697 pgprot_noncached(PAGE_READONLY));
35698-
35699- if (ret)
35700- goto up_fail;
35701 }
35702 #endif
35703
35704 up_fail:
35705 if (ret)
35706- current->mm->context.vdso = NULL;
35707+ current->mm->context.vdso = 0;
35708
35709 up_write(&mm->mmap_sem);
35710 return ret;
35711@@ -191,8 +190,8 @@ static int load_vdso32(void)
35712
35713 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
35714 current_thread_info()->sysenter_return =
35715- current->mm->context.vdso +
35716- selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
35717+ (void __force_user *)(current->mm->context.vdso +
35718+ selected_vdso32->sym_VDSO32_SYSENTER_RETURN);
35719
35720 return 0;
35721 }
35722@@ -201,9 +200,6 @@ static int load_vdso32(void)
35723 #ifdef CONFIG_X86_64
35724 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35725 {
35726- if (!vdso64_enabled)
35727- return 0;
35728-
35729 return map_vdso(&vdso_image_64, true);
35730 }
35731
35732@@ -212,12 +208,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
35733 int uses_interp)
35734 {
35735 #ifdef CONFIG_X86_X32_ABI
35736- if (test_thread_flag(TIF_X32)) {
35737- if (!vdso64_enabled)
35738- return 0;
35739-
35740+ if (test_thread_flag(TIF_X32))
35741 return map_vdso(&vdso_image_x32, true);
35742- }
35743 #endif
35744
35745 return load_vdso32();
35746@@ -231,15 +223,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35747 #endif
35748
35749 #ifdef CONFIG_X86_64
35750-static __init int vdso_setup(char *s)
35751-{
35752- vdso64_enabled = simple_strtoul(s, NULL, 0);
35753- return 0;
35754-}
35755-__setup("vdso=", vdso_setup);
35756-#endif
35757-
35758-#ifdef CONFIG_X86_64
35759 static void vgetcpu_cpu_init(void *arg)
35760 {
35761 int cpu = smp_processor_id();
35762diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
35763index e88fda8..76ce7ce 100644
35764--- a/arch/x86/xen/Kconfig
35765+++ b/arch/x86/xen/Kconfig
35766@@ -9,6 +9,7 @@ config XEN
35767 select XEN_HAVE_PVMMU
35768 depends on X86_64 || (X86_32 && X86_PAE)
35769 depends on X86_TSC
35770+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
35771 help
35772 This is the Linux Xen port. Enabling this will allow the
35773 kernel to boot in a paravirtualized environment under the
35774diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
35775index 5240f56..0c12163 100644
35776--- a/arch/x86/xen/enlighten.c
35777+++ b/arch/x86/xen/enlighten.c
35778@@ -125,8 +125,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
35779
35780 struct shared_info xen_dummy_shared_info;
35781
35782-void *xen_initial_gdt;
35783-
35784 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
35785 __read_mostly int xen_have_vector_callback;
35786 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
35787@@ -544,8 +542,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
35788 {
35789 unsigned long va = dtr->address;
35790 unsigned int size = dtr->size + 1;
35791- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35792- unsigned long frames[pages];
35793+ unsigned long frames[65536 / PAGE_SIZE];
35794 int f;
35795
35796 /*
35797@@ -593,8 +590,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35798 {
35799 unsigned long va = dtr->address;
35800 unsigned int size = dtr->size + 1;
35801- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35802- unsigned long frames[pages];
35803+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
35804 int f;
35805
35806 /*
35807@@ -602,7 +598,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35808 * 8-byte entries, or 16 4k pages..
35809 */
35810
35811- BUG_ON(size > 65536);
35812+ BUG_ON(size > GDT_SIZE);
35813 BUG_ON(va & ~PAGE_MASK);
35814
35815 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
35816@@ -991,7 +987,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
35817 return 0;
35818 }
35819
35820-static void set_xen_basic_apic_ops(void)
35821+static void __init set_xen_basic_apic_ops(void)
35822 {
35823 apic->read = xen_apic_read;
35824 apic->write = xen_apic_write;
35825@@ -1308,30 +1304,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
35826 #endif
35827 };
35828
35829-static void xen_reboot(int reason)
35830+static __noreturn void xen_reboot(int reason)
35831 {
35832 struct sched_shutdown r = { .reason = reason };
35833
35834- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
35835- BUG();
35836+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
35837+ BUG();
35838 }
35839
35840-static void xen_restart(char *msg)
35841+static __noreturn void xen_restart(char *msg)
35842 {
35843 xen_reboot(SHUTDOWN_reboot);
35844 }
35845
35846-static void xen_emergency_restart(void)
35847+static __noreturn void xen_emergency_restart(void)
35848 {
35849 xen_reboot(SHUTDOWN_reboot);
35850 }
35851
35852-static void xen_machine_halt(void)
35853+static __noreturn void xen_machine_halt(void)
35854 {
35855 xen_reboot(SHUTDOWN_poweroff);
35856 }
35857
35858-static void xen_machine_power_off(void)
35859+static __noreturn void xen_machine_power_off(void)
35860 {
35861 if (pm_power_off)
35862 pm_power_off();
35863@@ -1484,8 +1480,11 @@ static void __ref xen_setup_gdt(int cpu)
35864 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
35865 pv_cpu_ops.load_gdt = xen_load_gdt_boot;
35866
35867- setup_stack_canary_segment(0);
35868- switch_to_new_gdt(0);
35869+ setup_stack_canary_segment(cpu);
35870+#ifdef CONFIG_X86_64
35871+ load_percpu_segment(cpu);
35872+#endif
35873+ switch_to_new_gdt(cpu);
35874
35875 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
35876 pv_cpu_ops.load_gdt = xen_load_gdt;
35877@@ -1600,7 +1599,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
35878 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
35879
35880 /* Work out if we support NX */
35881- x86_configure_nx();
35882+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
35883+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
35884+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
35885+ unsigned l, h;
35886+
35887+ __supported_pte_mask |= _PAGE_NX;
35888+ rdmsr(MSR_EFER, l, h);
35889+ l |= EFER_NX;
35890+ wrmsr(MSR_EFER, l, h);
35891+ }
35892+#endif
35893
35894 /* Get mfn list */
35895 xen_build_dynamic_phys_to_machine();
35896@@ -1628,13 +1637,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
35897
35898 machine_ops = xen_machine_ops;
35899
35900- /*
35901- * The only reliable way to retain the initial address of the
35902- * percpu gdt_page is to remember it here, so we can go and
35903- * mark it RW later, when the initial percpu area is freed.
35904- */
35905- xen_initial_gdt = &per_cpu(gdt_page, 0);
35906-
35907 xen_smp_init();
35908
35909 #ifdef CONFIG_ACPI_NUMA
35910diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
35911index adca9e2..cdba9d1 100644
35912--- a/arch/x86/xen/mmu.c
35913+++ b/arch/x86/xen/mmu.c
35914@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
35915 return val;
35916 }
35917
35918-static pteval_t pte_pfn_to_mfn(pteval_t val)
35919+static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
35920 {
35921 if (val & _PAGE_PRESENT) {
35922 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
35923@@ -1835,7 +1835,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35924 * L3_k[511] -> level2_fixmap_pgt */
35925 convert_pfn_mfn(level3_kernel_pgt);
35926
35927+ convert_pfn_mfn(level3_vmalloc_start_pgt);
35928+ convert_pfn_mfn(level3_vmalloc_end_pgt);
35929+ convert_pfn_mfn(level3_vmemmap_pgt);
35930 /* L3_k[511][506] -> level1_fixmap_pgt */
35931+ /* L3_k[511][507] -> level1_vsyscall_pgt */
35932 convert_pfn_mfn(level2_fixmap_pgt);
35933 }
35934 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
35935@@ -1860,11 +1864,18 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35936 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
35937 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
35938 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
35939+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
35940+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
35941+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
35942 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
35943 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
35944+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
35945 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
35946 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
35947- set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
35948+ set_page_prot(level1_fixmap_pgt[0], PAGE_KERNEL_RO);
35949+ set_page_prot(level1_fixmap_pgt[1], PAGE_KERNEL_RO);
35950+ set_page_prot(level1_fixmap_pgt[2], PAGE_KERNEL_RO);
35951+ set_page_prot(level1_vsyscall_pgt, PAGE_KERNEL_RO);
35952
35953 /* Pin down new L4 */
35954 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
35955@@ -2048,6 +2059,7 @@ static void __init xen_post_allocator_init(void)
35956 pv_mmu_ops.set_pud = xen_set_pud;
35957 #if PAGETABLE_LEVELS == 4
35958 pv_mmu_ops.set_pgd = xen_set_pgd;
35959+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
35960 #endif
35961
35962 /* This will work as long as patching hasn't happened yet
35963@@ -2126,6 +2138,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
35964 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
35965 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
35966 .set_pgd = xen_set_pgd_hyper,
35967+ .set_pgd_batched = xen_set_pgd_hyper,
35968
35969 .alloc_pud = xen_alloc_pmd_init,
35970 .release_pud = xen_release_pmd_init,
35971diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
35972index 08e8489..b1e182f 100644
35973--- a/arch/x86/xen/smp.c
35974+++ b/arch/x86/xen/smp.c
35975@@ -288,17 +288,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
35976
35977 if (xen_pv_domain()) {
35978 if (!xen_feature(XENFEAT_writable_page_tables))
35979- /* We've switched to the "real" per-cpu gdt, so make
35980- * sure the old memory can be recycled. */
35981- make_lowmem_page_readwrite(xen_initial_gdt);
35982-
35983 #ifdef CONFIG_X86_32
35984 /*
35985 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
35986 * expects __USER_DS
35987 */
35988- loadsegment(ds, __USER_DS);
35989- loadsegment(es, __USER_DS);
35990+ loadsegment(ds, __KERNEL_DS);
35991+ loadsegment(es, __KERNEL_DS);
35992 #endif
35993
35994 xen_filter_cpu_maps();
35995@@ -379,7 +375,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
35996 #ifdef CONFIG_X86_32
35997 /* Note: PVH is not yet supported on x86_32. */
35998 ctxt->user_regs.fs = __KERNEL_PERCPU;
35999- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
36000+ savesegment(gs, ctxt->user_regs.gs);
36001 #endif
36002 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
36003
36004@@ -387,8 +383,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
36005 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
36006 ctxt->flags = VGCF_IN_KERNEL;
36007 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
36008- ctxt->user_regs.ds = __USER_DS;
36009- ctxt->user_regs.es = __USER_DS;
36010+ ctxt->user_regs.ds = __KERNEL_DS;
36011+ ctxt->user_regs.es = __KERNEL_DS;
36012 ctxt->user_regs.ss = __KERNEL_DS;
36013
36014 xen_copy_trap_info(ctxt->trap_ctxt);
36015@@ -446,14 +442,13 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
36016 int rc;
36017
36018 per_cpu(current_task, cpu) = idle;
36019+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
36020 #ifdef CONFIG_X86_32
36021 irq_ctx_init(cpu);
36022 #else
36023 clear_tsk_thread_flag(idle, TIF_FORK);
36024 #endif
36025- per_cpu(kernel_stack, cpu) =
36026- (unsigned long)task_stack_page(idle) -
36027- KERNEL_STACK_OFFSET + THREAD_SIZE;
36028+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
36029
36030 xen_setup_runstate_info(cpu);
36031 xen_setup_timer(cpu);
36032@@ -732,7 +727,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
36033
36034 void __init xen_smp_init(void)
36035 {
36036- smp_ops = xen_smp_ops;
36037+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
36038 xen_fill_possible_map();
36039 }
36040
36041diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
36042index fd92a64..1f72641 100644
36043--- a/arch/x86/xen/xen-asm_32.S
36044+++ b/arch/x86/xen/xen-asm_32.S
36045@@ -99,7 +99,7 @@ ENTRY(xen_iret)
36046 pushw %fs
36047 movl $(__KERNEL_PERCPU), %eax
36048 movl %eax, %fs
36049- movl %fs:xen_vcpu, %eax
36050+ mov PER_CPU_VAR(xen_vcpu), %eax
36051 POP_FS
36052 #else
36053 movl %ss:xen_vcpu, %eax
36054diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
36055index 674b2225..f1f5dc1 100644
36056--- a/arch/x86/xen/xen-head.S
36057+++ b/arch/x86/xen/xen-head.S
36058@@ -39,6 +39,17 @@ ENTRY(startup_xen)
36059 #ifdef CONFIG_X86_32
36060 mov %esi,xen_start_info
36061 mov $init_thread_union+THREAD_SIZE,%esp
36062+#ifdef CONFIG_SMP
36063+ movl $cpu_gdt_table,%edi
36064+ movl $__per_cpu_load,%eax
36065+ movw %ax,__KERNEL_PERCPU + 2(%edi)
36066+ rorl $16,%eax
36067+ movb %al,__KERNEL_PERCPU + 4(%edi)
36068+ movb %ah,__KERNEL_PERCPU + 7(%edi)
36069+ movl $__per_cpu_end - 1,%eax
36070+ subl $__per_cpu_start,%eax
36071+ movw %ax,__KERNEL_PERCPU + 0(%edi)
36072+#endif
36073 #else
36074 mov %rsi,xen_start_info
36075 mov $init_thread_union+THREAD_SIZE,%rsp
36076diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
36077index 9e195c6..523ed36 100644
36078--- a/arch/x86/xen/xen-ops.h
36079+++ b/arch/x86/xen/xen-ops.h
36080@@ -16,8 +16,6 @@ void xen_syscall_target(void);
36081 void xen_syscall32_target(void);
36082 #endif
36083
36084-extern void *xen_initial_gdt;
36085-
36086 struct trap_info;
36087 void xen_copy_trap_info(struct trap_info *traps);
36088
36089diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
36090index 525bd3d..ef888b1 100644
36091--- a/arch/xtensa/variants/dc232b/include/variant/core.h
36092+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
36093@@ -119,9 +119,9 @@
36094 ----------------------------------------------------------------------*/
36095
36096 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
36097-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
36098 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
36099 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
36100+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36101
36102 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
36103 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
36104diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
36105index 2f33760..835e50a 100644
36106--- a/arch/xtensa/variants/fsf/include/variant/core.h
36107+++ b/arch/xtensa/variants/fsf/include/variant/core.h
36108@@ -11,6 +11,7 @@
36109 #ifndef _XTENSA_CORE_H
36110 #define _XTENSA_CORE_H
36111
36112+#include <linux/const.h>
36113
36114 /****************************************************************************
36115 Parameters Useful for Any Code, USER or PRIVILEGED
36116@@ -112,9 +113,9 @@
36117 ----------------------------------------------------------------------*/
36118
36119 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
36120-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
36121 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
36122 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
36123+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36124
36125 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
36126 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
36127diff --git a/block/bio.c b/block/bio.c
36128index f66a4ea..73ddf55 100644
36129--- a/block/bio.c
36130+++ b/block/bio.c
36131@@ -1172,7 +1172,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
36132 /*
36133 * Overflow, abort
36134 */
36135- if (end < start)
36136+ if (end < start || end - start > INT_MAX - nr_pages)
36137 return ERR_PTR(-EINVAL);
36138
36139 nr_pages += end - start;
36140@@ -1297,7 +1297,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
36141 /*
36142 * Overflow, abort
36143 */
36144- if (end < start)
36145+ if (end < start || end - start > INT_MAX - nr_pages)
36146 return ERR_PTR(-EINVAL);
36147
36148 nr_pages += end - start;
36149diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
36150index 0736729..2ec3b48 100644
36151--- a/block/blk-iopoll.c
36152+++ b/block/blk-iopoll.c
36153@@ -74,7 +74,7 @@ void blk_iopoll_complete(struct blk_iopoll *iop)
36154 }
36155 EXPORT_SYMBOL(blk_iopoll_complete);
36156
36157-static void blk_iopoll_softirq(struct softirq_action *h)
36158+static __latent_entropy void blk_iopoll_softirq(void)
36159 {
36160 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
36161 int rearm = 0, budget = blk_iopoll_budget;
36162diff --git a/block/blk-map.c b/block/blk-map.c
36163index b8d2725..08c52b0 100644
36164--- a/block/blk-map.c
36165+++ b/block/blk-map.c
36166@@ -192,7 +192,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
36167 if (!len || !kbuf)
36168 return -EINVAL;
36169
36170- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
36171+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
36172 if (do_copy)
36173 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
36174 else
36175diff --git a/block/blk-softirq.c b/block/blk-softirq.c
36176index 53b1737..08177d2e 100644
36177--- a/block/blk-softirq.c
36178+++ b/block/blk-softirq.c
36179@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
36180 * Softirq action handler - move entries to local list and loop over them
36181 * while passing them to the queue registered handler.
36182 */
36183-static void blk_done_softirq(struct softirq_action *h)
36184+static __latent_entropy void blk_done_softirq(void)
36185 {
36186 struct list_head *cpu_list, local_list;
36187
36188diff --git a/block/bsg.c b/block/bsg.c
36189index d214e92..9649863 100644
36190--- a/block/bsg.c
36191+++ b/block/bsg.c
36192@@ -140,16 +140,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
36193 struct sg_io_v4 *hdr, struct bsg_device *bd,
36194 fmode_t has_write_perm)
36195 {
36196+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36197+ unsigned char *cmdptr;
36198+
36199 if (hdr->request_len > BLK_MAX_CDB) {
36200 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
36201 if (!rq->cmd)
36202 return -ENOMEM;
36203- }
36204+ cmdptr = rq->cmd;
36205+ } else
36206+ cmdptr = tmpcmd;
36207
36208- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
36209+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
36210 hdr->request_len))
36211 return -EFAULT;
36212
36213+ if (cmdptr != rq->cmd)
36214+ memcpy(rq->cmd, cmdptr, hdr->request_len);
36215+
36216 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
36217 if (blk_verify_command(rq->cmd, has_write_perm))
36218 return -EPERM;
36219diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
36220index f678c73..f35aa18 100644
36221--- a/block/compat_ioctl.c
36222+++ b/block/compat_ioctl.c
36223@@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
36224 cgc = compat_alloc_user_space(sizeof(*cgc));
36225 cgc32 = compat_ptr(arg);
36226
36227- if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
36228+ if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
36229 get_user(data, &cgc32->buffer) ||
36230 put_user(compat_ptr(data), &cgc->buffer) ||
36231 copy_in_user(&cgc->buflen, &cgc32->buflen,
36232@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
36233 err |= __get_user(f->spec1, &uf->spec1);
36234 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
36235 err |= __get_user(name, &uf->name);
36236- f->name = compat_ptr(name);
36237+ f->name = (void __force_kernel *)compat_ptr(name);
36238 if (err) {
36239 err = -EFAULT;
36240 goto out;
36241diff --git a/block/genhd.c b/block/genhd.c
36242index 0a536dc..b8f7aca 100644
36243--- a/block/genhd.c
36244+++ b/block/genhd.c
36245@@ -469,21 +469,24 @@ static char *bdevt_str(dev_t devt, char *buf)
36246
36247 /*
36248 * Register device numbers dev..(dev+range-1)
36249- * range must be nonzero
36250+ * Noop if @range is zero.
36251 * The hash chain is sorted on range, so that subranges can override.
36252 */
36253 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
36254 struct kobject *(*probe)(dev_t, int *, void *),
36255 int (*lock)(dev_t, void *), void *data)
36256 {
36257- kobj_map(bdev_map, devt, range, module, probe, lock, data);
36258+ if (range)
36259+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
36260 }
36261
36262 EXPORT_SYMBOL(blk_register_region);
36263
36264+/* undo blk_register_region(), noop if @range is zero */
36265 void blk_unregister_region(dev_t devt, unsigned long range)
36266 {
36267- kobj_unmap(bdev_map, devt, range);
36268+ if (range)
36269+ kobj_unmap(bdev_map, devt, range);
36270 }
36271
36272 EXPORT_SYMBOL(blk_unregister_region);
36273diff --git a/block/partitions/efi.c b/block/partitions/efi.c
36274index 26cb624..a49c3a5 100644
36275--- a/block/partitions/efi.c
36276+++ b/block/partitions/efi.c
36277@@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
36278 if (!gpt)
36279 return NULL;
36280
36281+ if (!le32_to_cpu(gpt->num_partition_entries))
36282+ return NULL;
36283+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
36284+ if (!pte)
36285+ return NULL;
36286+
36287 count = le32_to_cpu(gpt->num_partition_entries) *
36288 le32_to_cpu(gpt->sizeof_partition_entry);
36289- if (!count)
36290- return NULL;
36291- pte = kmalloc(count, GFP_KERNEL);
36292- if (!pte)
36293- return NULL;
36294-
36295 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
36296 (u8 *) pte, count) < count) {
36297 kfree(pte);
36298diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
36299index e1f71c3..02d295a 100644
36300--- a/block/scsi_ioctl.c
36301+++ b/block/scsi_ioctl.c
36302@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
36303 return put_user(0, p);
36304 }
36305
36306-static int sg_get_timeout(struct request_queue *q)
36307+static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
36308 {
36309 return jiffies_to_clock_t(q->sg_timeout);
36310 }
36311@@ -227,8 +227,20 @@ EXPORT_SYMBOL(blk_verify_command);
36312 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
36313 struct sg_io_hdr *hdr, fmode_t mode)
36314 {
36315- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
36316+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36317+ unsigned char *cmdptr;
36318+
36319+ if (rq->cmd != rq->__cmd)
36320+ cmdptr = rq->cmd;
36321+ else
36322+ cmdptr = tmpcmd;
36323+
36324+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
36325 return -EFAULT;
36326+
36327+ if (cmdptr != rq->cmd)
36328+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
36329+
36330 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
36331 return -EPERM;
36332
36333@@ -422,6 +434,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36334 int err;
36335 unsigned int in_len, out_len, bytes, opcode, cmdlen;
36336 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
36337+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36338+ unsigned char *cmdptr;
36339
36340 if (!sic)
36341 return -EINVAL;
36342@@ -460,9 +474,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36343 */
36344 err = -EFAULT;
36345 rq->cmd_len = cmdlen;
36346- if (copy_from_user(rq->cmd, sic->data, cmdlen))
36347+
36348+ if (rq->cmd != rq->__cmd)
36349+ cmdptr = rq->cmd;
36350+ else
36351+ cmdptr = tmpcmd;
36352+
36353+ if (copy_from_user(cmdptr, sic->data, cmdlen))
36354 goto error;
36355
36356+ if (rq->cmd != cmdptr)
36357+ memcpy(rq->cmd, cmdptr, cmdlen);
36358+
36359 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
36360 goto error;
36361
36362diff --git a/crypto/cryptd.c b/crypto/cryptd.c
36363index 650afac1..f3307de 100644
36364--- a/crypto/cryptd.c
36365+++ b/crypto/cryptd.c
36366@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
36367
36368 struct cryptd_blkcipher_request_ctx {
36369 crypto_completion_t complete;
36370-};
36371+} __no_const;
36372
36373 struct cryptd_hash_ctx {
36374 struct crypto_shash *child;
36375@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
36376
36377 struct cryptd_aead_request_ctx {
36378 crypto_completion_t complete;
36379-};
36380+} __no_const;
36381
36382 static void cryptd_queue_worker(struct work_struct *work);
36383
36384diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
36385index c305d41..a96de79 100644
36386--- a/crypto/pcrypt.c
36387+++ b/crypto/pcrypt.c
36388@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
36389 int ret;
36390
36391 pinst->kobj.kset = pcrypt_kset;
36392- ret = kobject_add(&pinst->kobj, NULL, name);
36393+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
36394 if (!ret)
36395 kobject_uevent(&pinst->kobj, KOBJ_ADD);
36396
36397diff --git a/crypto/zlib.c b/crypto/zlib.c
36398index 0eefa9d..0fa3d29 100644
36399--- a/crypto/zlib.c
36400+++ b/crypto/zlib.c
36401@@ -95,10 +95,10 @@ static int zlib_compress_setup(struct crypto_pcomp *tfm, void *params,
36402 zlib_comp_exit(ctx);
36403
36404 window_bits = tb[ZLIB_COMP_WINDOWBITS]
36405- ? nla_get_u32(tb[ZLIB_COMP_WINDOWBITS])
36406+ ? nla_get_s32(tb[ZLIB_COMP_WINDOWBITS])
36407 : MAX_WBITS;
36408 mem_level = tb[ZLIB_COMP_MEMLEVEL]
36409- ? nla_get_u32(tb[ZLIB_COMP_MEMLEVEL])
36410+ ? nla_get_s32(tb[ZLIB_COMP_MEMLEVEL])
36411 : DEF_MEM_LEVEL;
36412
36413 workspacesize = zlib_deflate_workspacesize(window_bits, mem_level);
36414diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
36415index 3b37676..898edfa 100644
36416--- a/drivers/acpi/acpica/hwxfsleep.c
36417+++ b/drivers/acpi/acpica/hwxfsleep.c
36418@@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
36419 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
36420
36421 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
36422- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36423- acpi_hw_extended_sleep},
36424- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36425- acpi_hw_extended_wake_prep},
36426- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
36427+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36428+ .extended_function = acpi_hw_extended_sleep},
36429+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36430+ .extended_function = acpi_hw_extended_wake_prep},
36431+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
36432+ .extended_function = acpi_hw_extended_wake}
36433 };
36434
36435 /*
36436diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
36437index 16129c7..8b675cd 100644
36438--- a/drivers/acpi/apei/apei-internal.h
36439+++ b/drivers/acpi/apei/apei-internal.h
36440@@ -19,7 +19,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
36441 struct apei_exec_ins_type {
36442 u32 flags;
36443 apei_exec_ins_func_t run;
36444-};
36445+} __do_const;
36446
36447 struct apei_exec_context {
36448 u32 ip;
36449diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
36450index e82d097..0c855c1 100644
36451--- a/drivers/acpi/apei/ghes.c
36452+++ b/drivers/acpi/apei/ghes.c
36453@@ -478,7 +478,7 @@ static void __ghes_print_estatus(const char *pfx,
36454 const struct acpi_hest_generic *generic,
36455 const struct acpi_hest_generic_status *estatus)
36456 {
36457- static atomic_t seqno;
36458+ static atomic_unchecked_t seqno;
36459 unsigned int curr_seqno;
36460 char pfx_seq[64];
36461
36462@@ -489,7 +489,7 @@ static void __ghes_print_estatus(const char *pfx,
36463 else
36464 pfx = KERN_ERR;
36465 }
36466- curr_seqno = atomic_inc_return(&seqno);
36467+ curr_seqno = atomic_inc_return_unchecked(&seqno);
36468 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
36469 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
36470 pfx_seq, generic->header.source_id);
36471diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
36472index a83e3c6..c3d617f 100644
36473--- a/drivers/acpi/bgrt.c
36474+++ b/drivers/acpi/bgrt.c
36475@@ -86,8 +86,10 @@ static int __init bgrt_init(void)
36476 if (!bgrt_image)
36477 return -ENODEV;
36478
36479- bin_attr_image.private = bgrt_image;
36480- bin_attr_image.size = bgrt_image_size;
36481+ pax_open_kernel();
36482+ *(void **)&bin_attr_image.private = bgrt_image;
36483+ *(size_t *)&bin_attr_image.size = bgrt_image_size;
36484+ pax_close_kernel();
36485
36486 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
36487 if (!bgrt_kobj)
36488diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
36489index 9b693d5..8953d54 100644
36490--- a/drivers/acpi/blacklist.c
36491+++ b/drivers/acpi/blacklist.c
36492@@ -51,7 +51,7 @@ struct acpi_blacklist_item {
36493 u32 is_critical_error;
36494 };
36495
36496-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
36497+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
36498
36499 /*
36500 * POLICY: If *anything* doesn't work, put it on the blacklist.
36501@@ -163,7 +163,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
36502 return 0;
36503 }
36504
36505-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
36506+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
36507 {
36508 .callback = dmi_disable_osi_vista,
36509 .ident = "Fujitsu Siemens",
36510diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
36511index 8b67bd0..b59593e 100644
36512--- a/drivers/acpi/bus.c
36513+++ b/drivers/acpi/bus.c
36514@@ -67,7 +67,7 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
36515 }
36516 #endif
36517
36518-static struct dmi_system_id dsdt_dmi_table[] __initdata = {
36519+static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
36520 /*
36521 * Invoke DSDT corruption work-around on all Toshiba Satellite.
36522 * https://bugzilla.kernel.org/show_bug.cgi?id=14679
36523@@ -83,7 +83,7 @@ static struct dmi_system_id dsdt_dmi_table[] __initdata = {
36524 {}
36525 };
36526 #else
36527-static struct dmi_system_id dsdt_dmi_table[] __initdata = {
36528+static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
36529 {}
36530 };
36531 #endif
36532diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
36533index c68e724..e863008 100644
36534--- a/drivers/acpi/custom_method.c
36535+++ b/drivers/acpi/custom_method.c
36536@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
36537 struct acpi_table_header table;
36538 acpi_status status;
36539
36540+#ifdef CONFIG_GRKERNSEC_KMEM
36541+ return -EPERM;
36542+#endif
36543+
36544 if (!(*ppos)) {
36545 /* parse the table header to get the table length */
36546 if (count <= sizeof(struct acpi_table_header))
36547diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
36548index 735db11..91e07ff 100644
36549--- a/drivers/acpi/device_pm.c
36550+++ b/drivers/acpi/device_pm.c
36551@@ -1025,6 +1025,8 @@ EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
36552
36553 #endif /* CONFIG_PM_SLEEP */
36554
36555+static void acpi_dev_pm_detach(struct device *dev, bool power_off);
36556+
36557 static struct dev_pm_domain acpi_general_pm_domain = {
36558 .ops = {
36559 .runtime_suspend = acpi_subsys_runtime_suspend,
36560@@ -1041,6 +1043,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
36561 .restore_early = acpi_subsys_resume_early,
36562 #endif
36563 },
36564+ .detach = acpi_dev_pm_detach
36565 };
36566
36567 /**
36568@@ -1110,7 +1113,6 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
36569 acpi_device_wakeup(adev, ACPI_STATE_S0, false);
36570 }
36571
36572- dev->pm_domain->detach = acpi_dev_pm_detach;
36573 return 0;
36574 }
36575 EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
36576diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
36577index a8dd2f7..e15950e 100644
36578--- a/drivers/acpi/ec.c
36579+++ b/drivers/acpi/ec.c
36580@@ -1242,7 +1242,7 @@ static int ec_clear_on_resume(const struct dmi_system_id *id)
36581 return 0;
36582 }
36583
36584-static struct dmi_system_id ec_dmi_table[] __initdata = {
36585+static const struct dmi_system_id ec_dmi_table[] __initconst = {
36586 {
36587 ec_skip_dsdt_scan, "Compal JFL92", {
36588 DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
36589diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
36590index 139d9e4..9a9d799 100644
36591--- a/drivers/acpi/pci_slot.c
36592+++ b/drivers/acpi/pci_slot.c
36593@@ -195,7 +195,7 @@ static int do_sta_before_sun(const struct dmi_system_id *d)
36594 return 0;
36595 }
36596
36597-static struct dmi_system_id acpi_pci_slot_dmi_table[] __initdata = {
36598+static const struct dmi_system_id acpi_pci_slot_dmi_table[] __initconst = {
36599 /*
36600 * Fujitsu Primequest machines will return 1023 to indicate an
36601 * error if the _SUN method is evaluated on SxFy objects that
36602diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
36603index d9f7158..168e742 100644
36604--- a/drivers/acpi/processor_driver.c
36605+++ b/drivers/acpi/processor_driver.c
36606@@ -159,7 +159,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
36607 return NOTIFY_OK;
36608 }
36609
36610-static struct notifier_block __refdata acpi_cpu_notifier = {
36611+static struct notifier_block __refconst acpi_cpu_notifier = {
36612 .notifier_call = acpi_cpu_soft_notify,
36613 };
36614
36615diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
36616index f98db0b..8309c83 100644
36617--- a/drivers/acpi/processor_idle.c
36618+++ b/drivers/acpi/processor_idle.c
36619@@ -912,7 +912,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
36620 {
36621 int i, count = CPUIDLE_DRIVER_STATE_START;
36622 struct acpi_processor_cx *cx;
36623- struct cpuidle_state *state;
36624+ cpuidle_state_no_const *state;
36625 struct cpuidle_driver *drv = &acpi_idle_driver;
36626
36627 if (!pr->flags.power_setup_done)
36628diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c
36629index e5dd808..1eceed1 100644
36630--- a/drivers/acpi/processor_pdc.c
36631+++ b/drivers/acpi/processor_pdc.c
36632@@ -176,7 +176,7 @@ static int __init set_no_mwait(const struct dmi_system_id *id)
36633 return 0;
36634 }
36635
36636-static struct dmi_system_id processor_idle_dmi_table[] __initdata = {
36637+static const struct dmi_system_id processor_idle_dmi_table[] __initconst = {
36638 {
36639 set_no_mwait, "Extensa 5220", {
36640 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
36641diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
36642index 7f251dd..47b262c 100644
36643--- a/drivers/acpi/sleep.c
36644+++ b/drivers/acpi/sleep.c
36645@@ -148,7 +148,7 @@ static int __init init_nvs_nosave(const struct dmi_system_id *d)
36646 return 0;
36647 }
36648
36649-static struct dmi_system_id acpisleep_dmi_table[] __initdata = {
36650+static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
36651 {
36652 .callback = init_old_suspend_ordering,
36653 .ident = "Abit KN9 (nForce4 variant)",
36654diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
36655index 13e577c..cef11ee 100644
36656--- a/drivers/acpi/sysfs.c
36657+++ b/drivers/acpi/sysfs.c
36658@@ -423,11 +423,11 @@ static u32 num_counters;
36659 static struct attribute **all_attrs;
36660 static u32 acpi_gpe_count;
36661
36662-static struct attribute_group interrupt_stats_attr_group = {
36663+static attribute_group_no_const interrupt_stats_attr_group = {
36664 .name = "interrupts",
36665 };
36666
36667-static struct kobj_attribute *counter_attrs;
36668+static kobj_attribute_no_const *counter_attrs;
36669
36670 static void delete_gpe_attr_array(void)
36671 {
36672diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
36673index d24fa19..782f1e6 100644
36674--- a/drivers/acpi/thermal.c
36675+++ b/drivers/acpi/thermal.c
36676@@ -1209,7 +1209,7 @@ static int thermal_psv(const struct dmi_system_id *d) {
36677 return 0;
36678 }
36679
36680-static struct dmi_system_id thermal_dmi_table[] __initdata = {
36681+static const struct dmi_system_id thermal_dmi_table[] __initconst = {
36682 /*
36683 * Award BIOS on this AOpen makes thermal control almost worthless.
36684 * http://bugzilla.kernel.org/show_bug.cgi?id=8842
36685diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
36686index 26eb70c..4d66ddf 100644
36687--- a/drivers/acpi/video.c
36688+++ b/drivers/acpi/video.c
36689@@ -418,7 +418,7 @@ static int __init video_disable_native_backlight(const struct dmi_system_id *d)
36690 return 0;
36691 }
36692
36693-static struct dmi_system_id video_dmi_table[] __initdata = {
36694+static const struct dmi_system_id video_dmi_table[] __initconst = {
36695 /*
36696 * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
36697 */
36698diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
36699index 61a9c07..ea98fa1 100644
36700--- a/drivers/ata/libahci.c
36701+++ b/drivers/ata/libahci.c
36702@@ -1252,7 +1252,7 @@ int ahci_kick_engine(struct ata_port *ap)
36703 }
36704 EXPORT_SYMBOL_GPL(ahci_kick_engine);
36705
36706-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36707+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36708 struct ata_taskfile *tf, int is_cmd, u16 flags,
36709 unsigned long timeout_msec)
36710 {
36711diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
36712index 23dac3b..89ada44 100644
36713--- a/drivers/ata/libata-core.c
36714+++ b/drivers/ata/libata-core.c
36715@@ -99,7 +99,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
36716 static void ata_dev_xfermask(struct ata_device *dev);
36717 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
36718
36719-atomic_t ata_print_id = ATOMIC_INIT(0);
36720+atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
36721
36722 struct ata_force_param {
36723 const char *name;
36724@@ -4780,7 +4780,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
36725 struct ata_port *ap;
36726 unsigned int tag;
36727
36728- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36729+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36730 ap = qc->ap;
36731
36732 qc->flags = 0;
36733@@ -4797,7 +4797,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
36734 struct ata_port *ap;
36735 struct ata_link *link;
36736
36737- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36738+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36739 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
36740 ap = qc->ap;
36741 link = qc->dev->link;
36742@@ -5901,6 +5901,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36743 return;
36744
36745 spin_lock(&lock);
36746+ pax_open_kernel();
36747
36748 for (cur = ops->inherits; cur; cur = cur->inherits) {
36749 void **inherit = (void **)cur;
36750@@ -5914,8 +5915,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36751 if (IS_ERR(*pp))
36752 *pp = NULL;
36753
36754- ops->inherits = NULL;
36755+ *(struct ata_port_operations **)&ops->inherits = NULL;
36756
36757+ pax_close_kernel();
36758 spin_unlock(&lock);
36759 }
36760
36761@@ -6111,7 +6113,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
36762
36763 /* give ports names and add SCSI hosts */
36764 for (i = 0; i < host->n_ports; i++) {
36765- host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
36766+ host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
36767 host->ports[i]->local_port_no = i + 1;
36768 }
36769
36770diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
36771index b061ba2..fdcd85f 100644
36772--- a/drivers/ata/libata-scsi.c
36773+++ b/drivers/ata/libata-scsi.c
36774@@ -4172,7 +4172,7 @@ int ata_sas_port_init(struct ata_port *ap)
36775
36776 if (rc)
36777 return rc;
36778- ap->print_id = atomic_inc_return(&ata_print_id);
36779+ ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
36780 return 0;
36781 }
36782 EXPORT_SYMBOL_GPL(ata_sas_port_init);
36783diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
36784index f840ca1..edd6ef3 100644
36785--- a/drivers/ata/libata.h
36786+++ b/drivers/ata/libata.h
36787@@ -53,7 +53,7 @@ enum {
36788 ATA_DNXFER_QUIET = (1 << 31),
36789 };
36790
36791-extern atomic_t ata_print_id;
36792+extern atomic_unchecked_t ata_print_id;
36793 extern int atapi_passthru16;
36794 extern int libata_fua;
36795 extern int libata_noacpi;
36796diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
36797index a9b0c82..207d97d 100644
36798--- a/drivers/ata/pata_arasan_cf.c
36799+++ b/drivers/ata/pata_arasan_cf.c
36800@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
36801 /* Handle platform specific quirks */
36802 if (quirk) {
36803 if (quirk & CF_BROKEN_PIO) {
36804- ap->ops->set_piomode = NULL;
36805+ pax_open_kernel();
36806+ *(void **)&ap->ops->set_piomode = NULL;
36807+ pax_close_kernel();
36808 ap->pio_mask = 0;
36809 }
36810 if (quirk & CF_BROKEN_MWDMA)
36811diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
36812index f9b983a..887b9d8 100644
36813--- a/drivers/atm/adummy.c
36814+++ b/drivers/atm/adummy.c
36815@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
36816 vcc->pop(vcc, skb);
36817 else
36818 dev_kfree_skb_any(skb);
36819- atomic_inc(&vcc->stats->tx);
36820+ atomic_inc_unchecked(&vcc->stats->tx);
36821
36822 return 0;
36823 }
36824diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
36825index f1a9198..f466a4a 100644
36826--- a/drivers/atm/ambassador.c
36827+++ b/drivers/atm/ambassador.c
36828@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
36829 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
36830
36831 // VC layer stats
36832- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36833+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36834
36835 // free the descriptor
36836 kfree (tx_descr);
36837@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36838 dump_skb ("<<<", vc, skb);
36839
36840 // VC layer stats
36841- atomic_inc(&atm_vcc->stats->rx);
36842+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36843 __net_timestamp(skb);
36844 // end of our responsibility
36845 atm_vcc->push (atm_vcc, skb);
36846@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36847 } else {
36848 PRINTK (KERN_INFO, "dropped over-size frame");
36849 // should we count this?
36850- atomic_inc(&atm_vcc->stats->rx_drop);
36851+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36852 }
36853
36854 } else {
36855@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
36856 }
36857
36858 if (check_area (skb->data, skb->len)) {
36859- atomic_inc(&atm_vcc->stats->tx_err);
36860+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
36861 return -ENOMEM; // ?
36862 }
36863
36864diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
36865index 480fa6f..947067c 100644
36866--- a/drivers/atm/atmtcp.c
36867+++ b/drivers/atm/atmtcp.c
36868@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36869 if (vcc->pop) vcc->pop(vcc,skb);
36870 else dev_kfree_skb(skb);
36871 if (dev_data) return 0;
36872- atomic_inc(&vcc->stats->tx_err);
36873+ atomic_inc_unchecked(&vcc->stats->tx_err);
36874 return -ENOLINK;
36875 }
36876 size = skb->len+sizeof(struct atmtcp_hdr);
36877@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36878 if (!new_skb) {
36879 if (vcc->pop) vcc->pop(vcc,skb);
36880 else dev_kfree_skb(skb);
36881- atomic_inc(&vcc->stats->tx_err);
36882+ atomic_inc_unchecked(&vcc->stats->tx_err);
36883 return -ENOBUFS;
36884 }
36885 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
36886@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36887 if (vcc->pop) vcc->pop(vcc,skb);
36888 else dev_kfree_skb(skb);
36889 out_vcc->push(out_vcc,new_skb);
36890- atomic_inc(&vcc->stats->tx);
36891- atomic_inc(&out_vcc->stats->rx);
36892+ atomic_inc_unchecked(&vcc->stats->tx);
36893+ atomic_inc_unchecked(&out_vcc->stats->rx);
36894 return 0;
36895 }
36896
36897@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36898 read_unlock(&vcc_sklist_lock);
36899 if (!out_vcc) {
36900 result = -EUNATCH;
36901- atomic_inc(&vcc->stats->tx_err);
36902+ atomic_inc_unchecked(&vcc->stats->tx_err);
36903 goto done;
36904 }
36905 skb_pull(skb,sizeof(struct atmtcp_hdr));
36906@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36907 __net_timestamp(new_skb);
36908 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
36909 out_vcc->push(out_vcc,new_skb);
36910- atomic_inc(&vcc->stats->tx);
36911- atomic_inc(&out_vcc->stats->rx);
36912+ atomic_inc_unchecked(&vcc->stats->tx);
36913+ atomic_inc_unchecked(&out_vcc->stats->rx);
36914 done:
36915 if (vcc->pop) vcc->pop(vcc,skb);
36916 else dev_kfree_skb(skb);
36917diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
36918index 6339efd..2b441d5 100644
36919--- a/drivers/atm/eni.c
36920+++ b/drivers/atm/eni.c
36921@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
36922 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
36923 vcc->dev->number);
36924 length = 0;
36925- atomic_inc(&vcc->stats->rx_err);
36926+ atomic_inc_unchecked(&vcc->stats->rx_err);
36927 }
36928 else {
36929 length = ATM_CELL_SIZE-1; /* no HEC */
36930@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36931 size);
36932 }
36933 eff = length = 0;
36934- atomic_inc(&vcc->stats->rx_err);
36935+ atomic_inc_unchecked(&vcc->stats->rx_err);
36936 }
36937 else {
36938 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
36939@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36940 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
36941 vcc->dev->number,vcc->vci,length,size << 2,descr);
36942 length = eff = 0;
36943- atomic_inc(&vcc->stats->rx_err);
36944+ atomic_inc_unchecked(&vcc->stats->rx_err);
36945 }
36946 }
36947 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
36948@@ -770,7 +770,7 @@ rx_dequeued++;
36949 vcc->push(vcc,skb);
36950 pushed++;
36951 }
36952- atomic_inc(&vcc->stats->rx);
36953+ atomic_inc_unchecked(&vcc->stats->rx);
36954 }
36955 wake_up(&eni_dev->rx_wait);
36956 }
36957@@ -1230,7 +1230,7 @@ static void dequeue_tx(struct atm_dev *dev)
36958 DMA_TO_DEVICE);
36959 if (vcc->pop) vcc->pop(vcc,skb);
36960 else dev_kfree_skb_irq(skb);
36961- atomic_inc(&vcc->stats->tx);
36962+ atomic_inc_unchecked(&vcc->stats->tx);
36963 wake_up(&eni_dev->tx_wait);
36964 dma_complete++;
36965 }
36966diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
36967index 82f2ae0..f205c02 100644
36968--- a/drivers/atm/firestream.c
36969+++ b/drivers/atm/firestream.c
36970@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
36971 }
36972 }
36973
36974- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36975+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36976
36977 fs_dprintk (FS_DEBUG_TXMEM, "i");
36978 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
36979@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36980 #endif
36981 skb_put (skb, qe->p1 & 0xffff);
36982 ATM_SKB(skb)->vcc = atm_vcc;
36983- atomic_inc(&atm_vcc->stats->rx);
36984+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36985 __net_timestamp(skb);
36986 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
36987 atm_vcc->push (atm_vcc, skb);
36988@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36989 kfree (pe);
36990 }
36991 if (atm_vcc)
36992- atomic_inc(&atm_vcc->stats->rx_drop);
36993+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36994 break;
36995 case 0x1f: /* Reassembly abort: no buffers. */
36996 /* Silently increment error counter. */
36997 if (atm_vcc)
36998- atomic_inc(&atm_vcc->stats->rx_drop);
36999+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
37000 break;
37001 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
37002 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
37003diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
37004index 75dde90..4309ead 100644
37005--- a/drivers/atm/fore200e.c
37006+++ b/drivers/atm/fore200e.c
37007@@ -932,9 +932,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
37008 #endif
37009 /* check error condition */
37010 if (*entry->status & STATUS_ERROR)
37011- atomic_inc(&vcc->stats->tx_err);
37012+ atomic_inc_unchecked(&vcc->stats->tx_err);
37013 else
37014- atomic_inc(&vcc->stats->tx);
37015+ atomic_inc_unchecked(&vcc->stats->tx);
37016 }
37017 }
37018
37019@@ -1083,7 +1083,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
37020 if (skb == NULL) {
37021 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
37022
37023- atomic_inc(&vcc->stats->rx_drop);
37024+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37025 return -ENOMEM;
37026 }
37027
37028@@ -1126,14 +1126,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
37029
37030 dev_kfree_skb_any(skb);
37031
37032- atomic_inc(&vcc->stats->rx_drop);
37033+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37034 return -ENOMEM;
37035 }
37036
37037 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
37038
37039 vcc->push(vcc, skb);
37040- atomic_inc(&vcc->stats->rx);
37041+ atomic_inc_unchecked(&vcc->stats->rx);
37042
37043 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
37044
37045@@ -1211,7 +1211,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
37046 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
37047 fore200e->atm_dev->number,
37048 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
37049- atomic_inc(&vcc->stats->rx_err);
37050+ atomic_inc_unchecked(&vcc->stats->rx_err);
37051 }
37052 }
37053
37054@@ -1656,7 +1656,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
37055 goto retry_here;
37056 }
37057
37058- atomic_inc(&vcc->stats->tx_err);
37059+ atomic_inc_unchecked(&vcc->stats->tx_err);
37060
37061 fore200e->tx_sat++;
37062 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
37063diff --git a/drivers/atm/he.c b/drivers/atm/he.c
37064index 93dca2e..c5daa69 100644
37065--- a/drivers/atm/he.c
37066+++ b/drivers/atm/he.c
37067@@ -1692,7 +1692,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
37068
37069 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
37070 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
37071- atomic_inc(&vcc->stats->rx_drop);
37072+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37073 goto return_host_buffers;
37074 }
37075
37076@@ -1719,7 +1719,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
37077 RBRQ_LEN_ERR(he_dev->rbrq_head)
37078 ? "LEN_ERR" : "",
37079 vcc->vpi, vcc->vci);
37080- atomic_inc(&vcc->stats->rx_err);
37081+ atomic_inc_unchecked(&vcc->stats->rx_err);
37082 goto return_host_buffers;
37083 }
37084
37085@@ -1771,7 +1771,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
37086 vcc->push(vcc, skb);
37087 spin_lock(&he_dev->global_lock);
37088
37089- atomic_inc(&vcc->stats->rx);
37090+ atomic_inc_unchecked(&vcc->stats->rx);
37091
37092 return_host_buffers:
37093 ++pdus_assembled;
37094@@ -2097,7 +2097,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
37095 tpd->vcc->pop(tpd->vcc, tpd->skb);
37096 else
37097 dev_kfree_skb_any(tpd->skb);
37098- atomic_inc(&tpd->vcc->stats->tx_err);
37099+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
37100 }
37101 dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
37102 return;
37103@@ -2509,7 +2509,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37104 vcc->pop(vcc, skb);
37105 else
37106 dev_kfree_skb_any(skb);
37107- atomic_inc(&vcc->stats->tx_err);
37108+ atomic_inc_unchecked(&vcc->stats->tx_err);
37109 return -EINVAL;
37110 }
37111
37112@@ -2520,7 +2520,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37113 vcc->pop(vcc, skb);
37114 else
37115 dev_kfree_skb_any(skb);
37116- atomic_inc(&vcc->stats->tx_err);
37117+ atomic_inc_unchecked(&vcc->stats->tx_err);
37118 return -EINVAL;
37119 }
37120 #endif
37121@@ -2532,7 +2532,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37122 vcc->pop(vcc, skb);
37123 else
37124 dev_kfree_skb_any(skb);
37125- atomic_inc(&vcc->stats->tx_err);
37126+ atomic_inc_unchecked(&vcc->stats->tx_err);
37127 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37128 return -ENOMEM;
37129 }
37130@@ -2574,7 +2574,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37131 vcc->pop(vcc, skb);
37132 else
37133 dev_kfree_skb_any(skb);
37134- atomic_inc(&vcc->stats->tx_err);
37135+ atomic_inc_unchecked(&vcc->stats->tx_err);
37136 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37137 return -ENOMEM;
37138 }
37139@@ -2605,7 +2605,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37140 __enqueue_tpd(he_dev, tpd, cid);
37141 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37142
37143- atomic_inc(&vcc->stats->tx);
37144+ atomic_inc_unchecked(&vcc->stats->tx);
37145
37146 return 0;
37147 }
37148diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
37149index 527bbd5..96570c8 100644
37150--- a/drivers/atm/horizon.c
37151+++ b/drivers/atm/horizon.c
37152@@ -1018,7 +1018,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
37153 {
37154 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
37155 // VC layer stats
37156- atomic_inc(&vcc->stats->rx);
37157+ atomic_inc_unchecked(&vcc->stats->rx);
37158 __net_timestamp(skb);
37159 // end of our responsibility
37160 vcc->push (vcc, skb);
37161@@ -1170,7 +1170,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
37162 dev->tx_iovec = NULL;
37163
37164 // VC layer stats
37165- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
37166+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
37167
37168 // free the skb
37169 hrz_kfree_skb (skb);
37170diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
37171index 074616b..d6b3d5f 100644
37172--- a/drivers/atm/idt77252.c
37173+++ b/drivers/atm/idt77252.c
37174@@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
37175 else
37176 dev_kfree_skb(skb);
37177
37178- atomic_inc(&vcc->stats->tx);
37179+ atomic_inc_unchecked(&vcc->stats->tx);
37180 }
37181
37182 atomic_dec(&scq->used);
37183@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37184 if ((sb = dev_alloc_skb(64)) == NULL) {
37185 printk("%s: Can't allocate buffers for aal0.\n",
37186 card->name);
37187- atomic_add(i, &vcc->stats->rx_drop);
37188+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
37189 break;
37190 }
37191 if (!atm_charge(vcc, sb->truesize)) {
37192 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
37193 card->name);
37194- atomic_add(i - 1, &vcc->stats->rx_drop);
37195+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
37196 dev_kfree_skb(sb);
37197 break;
37198 }
37199@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37200 ATM_SKB(sb)->vcc = vcc;
37201 __net_timestamp(sb);
37202 vcc->push(vcc, sb);
37203- atomic_inc(&vcc->stats->rx);
37204+ atomic_inc_unchecked(&vcc->stats->rx);
37205
37206 cell += ATM_CELL_PAYLOAD;
37207 }
37208@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37209 "(CDC: %08x)\n",
37210 card->name, len, rpp->len, readl(SAR_REG_CDC));
37211 recycle_rx_pool_skb(card, rpp);
37212- atomic_inc(&vcc->stats->rx_err);
37213+ atomic_inc_unchecked(&vcc->stats->rx_err);
37214 return;
37215 }
37216 if (stat & SAR_RSQE_CRC) {
37217 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
37218 recycle_rx_pool_skb(card, rpp);
37219- atomic_inc(&vcc->stats->rx_err);
37220+ atomic_inc_unchecked(&vcc->stats->rx_err);
37221 return;
37222 }
37223 if (skb_queue_len(&rpp->queue) > 1) {
37224@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37225 RXPRINTK("%s: Can't alloc RX skb.\n",
37226 card->name);
37227 recycle_rx_pool_skb(card, rpp);
37228- atomic_inc(&vcc->stats->rx_err);
37229+ atomic_inc_unchecked(&vcc->stats->rx_err);
37230 return;
37231 }
37232 if (!atm_charge(vcc, skb->truesize)) {
37233@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37234 __net_timestamp(skb);
37235
37236 vcc->push(vcc, skb);
37237- atomic_inc(&vcc->stats->rx);
37238+ atomic_inc_unchecked(&vcc->stats->rx);
37239
37240 return;
37241 }
37242@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37243 __net_timestamp(skb);
37244
37245 vcc->push(vcc, skb);
37246- atomic_inc(&vcc->stats->rx);
37247+ atomic_inc_unchecked(&vcc->stats->rx);
37248
37249 if (skb->truesize > SAR_FB_SIZE_3)
37250 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
37251@@ -1302,14 +1302,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
37252 if (vcc->qos.aal != ATM_AAL0) {
37253 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
37254 card->name, vpi, vci);
37255- atomic_inc(&vcc->stats->rx_drop);
37256+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37257 goto drop;
37258 }
37259
37260 if ((sb = dev_alloc_skb(64)) == NULL) {
37261 printk("%s: Can't allocate buffers for AAL0.\n",
37262 card->name);
37263- atomic_inc(&vcc->stats->rx_err);
37264+ atomic_inc_unchecked(&vcc->stats->rx_err);
37265 goto drop;
37266 }
37267
37268@@ -1328,7 +1328,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
37269 ATM_SKB(sb)->vcc = vcc;
37270 __net_timestamp(sb);
37271 vcc->push(vcc, sb);
37272- atomic_inc(&vcc->stats->rx);
37273+ atomic_inc_unchecked(&vcc->stats->rx);
37274
37275 drop:
37276 skb_pull(queue, 64);
37277@@ -1953,13 +1953,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37278
37279 if (vc == NULL) {
37280 printk("%s: NULL connection in send().\n", card->name);
37281- atomic_inc(&vcc->stats->tx_err);
37282+ atomic_inc_unchecked(&vcc->stats->tx_err);
37283 dev_kfree_skb(skb);
37284 return -EINVAL;
37285 }
37286 if (!test_bit(VCF_TX, &vc->flags)) {
37287 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
37288- atomic_inc(&vcc->stats->tx_err);
37289+ atomic_inc_unchecked(&vcc->stats->tx_err);
37290 dev_kfree_skb(skb);
37291 return -EINVAL;
37292 }
37293@@ -1971,14 +1971,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37294 break;
37295 default:
37296 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
37297- atomic_inc(&vcc->stats->tx_err);
37298+ atomic_inc_unchecked(&vcc->stats->tx_err);
37299 dev_kfree_skb(skb);
37300 return -EINVAL;
37301 }
37302
37303 if (skb_shinfo(skb)->nr_frags != 0) {
37304 printk("%s: No scatter-gather yet.\n", card->name);
37305- atomic_inc(&vcc->stats->tx_err);
37306+ atomic_inc_unchecked(&vcc->stats->tx_err);
37307 dev_kfree_skb(skb);
37308 return -EINVAL;
37309 }
37310@@ -1986,7 +1986,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37311
37312 err = queue_skb(card, vc, skb, oam);
37313 if (err) {
37314- atomic_inc(&vcc->stats->tx_err);
37315+ atomic_inc_unchecked(&vcc->stats->tx_err);
37316 dev_kfree_skb(skb);
37317 return err;
37318 }
37319@@ -2009,7 +2009,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
37320 skb = dev_alloc_skb(64);
37321 if (!skb) {
37322 printk("%s: Out of memory in send_oam().\n", card->name);
37323- atomic_inc(&vcc->stats->tx_err);
37324+ atomic_inc_unchecked(&vcc->stats->tx_err);
37325 return -ENOMEM;
37326 }
37327 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
37328diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
37329index 924f8e2..3375a3e 100644
37330--- a/drivers/atm/iphase.c
37331+++ b/drivers/atm/iphase.c
37332@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
37333 status = (u_short) (buf_desc_ptr->desc_mode);
37334 if (status & (RX_CER | RX_PTE | RX_OFL))
37335 {
37336- atomic_inc(&vcc->stats->rx_err);
37337+ atomic_inc_unchecked(&vcc->stats->rx_err);
37338 IF_ERR(printk("IA: bad packet, dropping it");)
37339 if (status & RX_CER) {
37340 IF_ERR(printk(" cause: packet CRC error\n");)
37341@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
37342 len = dma_addr - buf_addr;
37343 if (len > iadev->rx_buf_sz) {
37344 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
37345- atomic_inc(&vcc->stats->rx_err);
37346+ atomic_inc_unchecked(&vcc->stats->rx_err);
37347 goto out_free_desc;
37348 }
37349
37350@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37351 ia_vcc = INPH_IA_VCC(vcc);
37352 if (ia_vcc == NULL)
37353 {
37354- atomic_inc(&vcc->stats->rx_err);
37355+ atomic_inc_unchecked(&vcc->stats->rx_err);
37356 atm_return(vcc, skb->truesize);
37357 dev_kfree_skb_any(skb);
37358 goto INCR_DLE;
37359@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37360 if ((length > iadev->rx_buf_sz) || (length >
37361 (skb->len - sizeof(struct cpcs_trailer))))
37362 {
37363- atomic_inc(&vcc->stats->rx_err);
37364+ atomic_inc_unchecked(&vcc->stats->rx_err);
37365 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
37366 length, skb->len);)
37367 atm_return(vcc, skb->truesize);
37368@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37369
37370 IF_RX(printk("rx_dle_intr: skb push");)
37371 vcc->push(vcc,skb);
37372- atomic_inc(&vcc->stats->rx);
37373+ atomic_inc_unchecked(&vcc->stats->rx);
37374 iadev->rx_pkt_cnt++;
37375 }
37376 INCR_DLE:
37377@@ -2828,15 +2828,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
37378 {
37379 struct k_sonet_stats *stats;
37380 stats = &PRIV(_ia_dev[board])->sonet_stats;
37381- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
37382- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
37383- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
37384- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
37385- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
37386- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
37387- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
37388- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
37389- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
37390+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
37391+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
37392+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
37393+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
37394+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
37395+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
37396+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
37397+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
37398+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
37399 }
37400 ia_cmds.status = 0;
37401 break;
37402@@ -2941,7 +2941,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37403 if ((desc == 0) || (desc > iadev->num_tx_desc))
37404 {
37405 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
37406- atomic_inc(&vcc->stats->tx);
37407+ atomic_inc_unchecked(&vcc->stats->tx);
37408 if (vcc->pop)
37409 vcc->pop(vcc, skb);
37410 else
37411@@ -3046,14 +3046,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37412 ATM_DESC(skb) = vcc->vci;
37413 skb_queue_tail(&iadev->tx_dma_q, skb);
37414
37415- atomic_inc(&vcc->stats->tx);
37416+ atomic_inc_unchecked(&vcc->stats->tx);
37417 iadev->tx_pkt_cnt++;
37418 /* Increment transaction counter */
37419 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
37420
37421 #if 0
37422 /* add flow control logic */
37423- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
37424+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
37425 if (iavcc->vc_desc_cnt > 10) {
37426 vcc->tx_quota = vcc->tx_quota * 3 / 4;
37427 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
37428diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
37429index ce43ae3..969de38 100644
37430--- a/drivers/atm/lanai.c
37431+++ b/drivers/atm/lanai.c
37432@@ -1295,7 +1295,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
37433 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
37434 lanai_endtx(lanai, lvcc);
37435 lanai_free_skb(lvcc->tx.atmvcc, skb);
37436- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
37437+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
37438 }
37439
37440 /* Try to fill the buffer - don't call unless there is backlog */
37441@@ -1418,7 +1418,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
37442 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
37443 __net_timestamp(skb);
37444 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
37445- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
37446+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
37447 out:
37448 lvcc->rx.buf.ptr = end;
37449 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
37450@@ -1659,7 +1659,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37451 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
37452 "vcc %d\n", lanai->number, (unsigned int) s, vci);
37453 lanai->stats.service_rxnotaal5++;
37454- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37455+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37456 return 0;
37457 }
37458 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
37459@@ -1671,7 +1671,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37460 int bytes;
37461 read_unlock(&vcc_sklist_lock);
37462 DPRINTK("got trashed rx pdu on vci %d\n", vci);
37463- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37464+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37465 lvcc->stats.x.aal5.service_trash++;
37466 bytes = (SERVICE_GET_END(s) * 16) -
37467 (((unsigned long) lvcc->rx.buf.ptr) -
37468@@ -1683,7 +1683,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37469 }
37470 if (s & SERVICE_STREAM) {
37471 read_unlock(&vcc_sklist_lock);
37472- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37473+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37474 lvcc->stats.x.aal5.service_stream++;
37475 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
37476 "PDU on VCI %d!\n", lanai->number, vci);
37477@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37478 return 0;
37479 }
37480 DPRINTK("got rx crc error on vci %d\n", vci);
37481- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37482+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37483 lvcc->stats.x.aal5.service_rxcrc++;
37484 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
37485 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
37486diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
37487index b7e1cc0..eb336bfe 100644
37488--- a/drivers/atm/nicstar.c
37489+++ b/drivers/atm/nicstar.c
37490@@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37491 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
37492 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
37493 card->index);
37494- atomic_inc(&vcc->stats->tx_err);
37495+ atomic_inc_unchecked(&vcc->stats->tx_err);
37496 dev_kfree_skb_any(skb);
37497 return -EINVAL;
37498 }
37499@@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37500 if (!vc->tx) {
37501 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
37502 card->index);
37503- atomic_inc(&vcc->stats->tx_err);
37504+ atomic_inc_unchecked(&vcc->stats->tx_err);
37505 dev_kfree_skb_any(skb);
37506 return -EINVAL;
37507 }
37508@@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37509 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
37510 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
37511 card->index);
37512- atomic_inc(&vcc->stats->tx_err);
37513+ atomic_inc_unchecked(&vcc->stats->tx_err);
37514 dev_kfree_skb_any(skb);
37515 return -EINVAL;
37516 }
37517
37518 if (skb_shinfo(skb)->nr_frags != 0) {
37519 printk("nicstar%d: No scatter-gather yet.\n", card->index);
37520- atomic_inc(&vcc->stats->tx_err);
37521+ atomic_inc_unchecked(&vcc->stats->tx_err);
37522 dev_kfree_skb_any(skb);
37523 return -EINVAL;
37524 }
37525@@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37526 }
37527
37528 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
37529- atomic_inc(&vcc->stats->tx_err);
37530+ atomic_inc_unchecked(&vcc->stats->tx_err);
37531 dev_kfree_skb_any(skb);
37532 return -EIO;
37533 }
37534- atomic_inc(&vcc->stats->tx);
37535+ atomic_inc_unchecked(&vcc->stats->tx);
37536
37537 return 0;
37538 }
37539@@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37540 printk
37541 ("nicstar%d: Can't allocate buffers for aal0.\n",
37542 card->index);
37543- atomic_add(i, &vcc->stats->rx_drop);
37544+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
37545 break;
37546 }
37547 if (!atm_charge(vcc, sb->truesize)) {
37548 RXPRINTK
37549 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
37550 card->index);
37551- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37552+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37553 dev_kfree_skb_any(sb);
37554 break;
37555 }
37556@@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37557 ATM_SKB(sb)->vcc = vcc;
37558 __net_timestamp(sb);
37559 vcc->push(vcc, sb);
37560- atomic_inc(&vcc->stats->rx);
37561+ atomic_inc_unchecked(&vcc->stats->rx);
37562 cell += ATM_CELL_PAYLOAD;
37563 }
37564
37565@@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37566 if (iovb == NULL) {
37567 printk("nicstar%d: Out of iovec buffers.\n",
37568 card->index);
37569- atomic_inc(&vcc->stats->rx_drop);
37570+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37571 recycle_rx_buf(card, skb);
37572 return;
37573 }
37574@@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37575 small or large buffer itself. */
37576 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
37577 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
37578- atomic_inc(&vcc->stats->rx_err);
37579+ atomic_inc_unchecked(&vcc->stats->rx_err);
37580 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37581 NS_MAX_IOVECS);
37582 NS_PRV_IOVCNT(iovb) = 0;
37583@@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37584 ("nicstar%d: Expected a small buffer, and this is not one.\n",
37585 card->index);
37586 which_list(card, skb);
37587- atomic_inc(&vcc->stats->rx_err);
37588+ atomic_inc_unchecked(&vcc->stats->rx_err);
37589 recycle_rx_buf(card, skb);
37590 vc->rx_iov = NULL;
37591 recycle_iov_buf(card, iovb);
37592@@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37593 ("nicstar%d: Expected a large buffer, and this is not one.\n",
37594 card->index);
37595 which_list(card, skb);
37596- atomic_inc(&vcc->stats->rx_err);
37597+ atomic_inc_unchecked(&vcc->stats->rx_err);
37598 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37599 NS_PRV_IOVCNT(iovb));
37600 vc->rx_iov = NULL;
37601@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37602 printk(" - PDU size mismatch.\n");
37603 else
37604 printk(".\n");
37605- atomic_inc(&vcc->stats->rx_err);
37606+ atomic_inc_unchecked(&vcc->stats->rx_err);
37607 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37608 NS_PRV_IOVCNT(iovb));
37609 vc->rx_iov = NULL;
37610@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37611 /* skb points to a small buffer */
37612 if (!atm_charge(vcc, skb->truesize)) {
37613 push_rxbufs(card, skb);
37614- atomic_inc(&vcc->stats->rx_drop);
37615+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37616 } else {
37617 skb_put(skb, len);
37618 dequeue_sm_buf(card, skb);
37619@@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37620 ATM_SKB(skb)->vcc = vcc;
37621 __net_timestamp(skb);
37622 vcc->push(vcc, skb);
37623- atomic_inc(&vcc->stats->rx);
37624+ atomic_inc_unchecked(&vcc->stats->rx);
37625 }
37626 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
37627 struct sk_buff *sb;
37628@@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37629 if (len <= NS_SMBUFSIZE) {
37630 if (!atm_charge(vcc, sb->truesize)) {
37631 push_rxbufs(card, sb);
37632- atomic_inc(&vcc->stats->rx_drop);
37633+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37634 } else {
37635 skb_put(sb, len);
37636 dequeue_sm_buf(card, sb);
37637@@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37638 ATM_SKB(sb)->vcc = vcc;
37639 __net_timestamp(sb);
37640 vcc->push(vcc, sb);
37641- atomic_inc(&vcc->stats->rx);
37642+ atomic_inc_unchecked(&vcc->stats->rx);
37643 }
37644
37645 push_rxbufs(card, skb);
37646@@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37647
37648 if (!atm_charge(vcc, skb->truesize)) {
37649 push_rxbufs(card, skb);
37650- atomic_inc(&vcc->stats->rx_drop);
37651+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37652 } else {
37653 dequeue_lg_buf(card, skb);
37654 #ifdef NS_USE_DESTRUCTORS
37655@@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37656 ATM_SKB(skb)->vcc = vcc;
37657 __net_timestamp(skb);
37658 vcc->push(vcc, skb);
37659- atomic_inc(&vcc->stats->rx);
37660+ atomic_inc_unchecked(&vcc->stats->rx);
37661 }
37662
37663 push_rxbufs(card, sb);
37664@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37665 printk
37666 ("nicstar%d: Out of huge buffers.\n",
37667 card->index);
37668- atomic_inc(&vcc->stats->rx_drop);
37669+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37670 recycle_iovec_rx_bufs(card,
37671 (struct iovec *)
37672 iovb->data,
37673@@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37674 card->hbpool.count++;
37675 } else
37676 dev_kfree_skb_any(hb);
37677- atomic_inc(&vcc->stats->rx_drop);
37678+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37679 } else {
37680 /* Copy the small buffer to the huge buffer */
37681 sb = (struct sk_buff *)iov->iov_base;
37682@@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37683 #endif /* NS_USE_DESTRUCTORS */
37684 __net_timestamp(hb);
37685 vcc->push(vcc, hb);
37686- atomic_inc(&vcc->stats->rx);
37687+ atomic_inc_unchecked(&vcc->stats->rx);
37688 }
37689 }
37690
37691diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
37692index 74e18b0..f16afa0 100644
37693--- a/drivers/atm/solos-pci.c
37694+++ b/drivers/atm/solos-pci.c
37695@@ -838,7 +838,7 @@ static void solos_bh(unsigned long card_arg)
37696 }
37697 atm_charge(vcc, skb->truesize);
37698 vcc->push(vcc, skb);
37699- atomic_inc(&vcc->stats->rx);
37700+ atomic_inc_unchecked(&vcc->stats->rx);
37701 break;
37702
37703 case PKT_STATUS:
37704@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
37705 vcc = SKB_CB(oldskb)->vcc;
37706
37707 if (vcc) {
37708- atomic_inc(&vcc->stats->tx);
37709+ atomic_inc_unchecked(&vcc->stats->tx);
37710 solos_pop(vcc, oldskb);
37711 } else {
37712 dev_kfree_skb_irq(oldskb);
37713diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
37714index 0215934..ce9f5b1 100644
37715--- a/drivers/atm/suni.c
37716+++ b/drivers/atm/suni.c
37717@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
37718
37719
37720 #define ADD_LIMITED(s,v) \
37721- atomic_add((v),&stats->s); \
37722- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
37723+ atomic_add_unchecked((v),&stats->s); \
37724+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
37725
37726
37727 static void suni_hz(unsigned long from_timer)
37728diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
37729index 5120a96..e2572bd 100644
37730--- a/drivers/atm/uPD98402.c
37731+++ b/drivers/atm/uPD98402.c
37732@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
37733 struct sonet_stats tmp;
37734 int error = 0;
37735
37736- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37737+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37738 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
37739 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
37740 if (zero && !error) {
37741@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
37742
37743
37744 #define ADD_LIMITED(s,v) \
37745- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
37746- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
37747- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37748+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
37749+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
37750+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37751
37752
37753 static void stat_event(struct atm_dev *dev)
37754@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
37755 if (reason & uPD98402_INT_PFM) stat_event(dev);
37756 if (reason & uPD98402_INT_PCO) {
37757 (void) GET(PCOCR); /* clear interrupt cause */
37758- atomic_add(GET(HECCT),
37759+ atomic_add_unchecked(GET(HECCT),
37760 &PRIV(dev)->sonet_stats.uncorr_hcs);
37761 }
37762 if ((reason & uPD98402_INT_RFO) &&
37763@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
37764 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
37765 uPD98402_INT_LOS),PIMR); /* enable them */
37766 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
37767- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37768- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
37769- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
37770+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37771+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
37772+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
37773 return 0;
37774 }
37775
37776diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
37777index cecfb94..87009ec 100644
37778--- a/drivers/atm/zatm.c
37779+++ b/drivers/atm/zatm.c
37780@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37781 }
37782 if (!size) {
37783 dev_kfree_skb_irq(skb);
37784- if (vcc) atomic_inc(&vcc->stats->rx_err);
37785+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
37786 continue;
37787 }
37788 if (!atm_charge(vcc,skb->truesize)) {
37789@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37790 skb->len = size;
37791 ATM_SKB(skb)->vcc = vcc;
37792 vcc->push(vcc,skb);
37793- atomic_inc(&vcc->stats->rx);
37794+ atomic_inc_unchecked(&vcc->stats->rx);
37795 }
37796 zout(pos & 0xffff,MTA(mbx));
37797 #if 0 /* probably a stupid idea */
37798@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
37799 skb_queue_head(&zatm_vcc->backlog,skb);
37800 break;
37801 }
37802- atomic_inc(&vcc->stats->tx);
37803+ atomic_inc_unchecked(&vcc->stats->tx);
37804 wake_up(&zatm_vcc->tx_wait);
37805 }
37806
37807diff --git a/drivers/base/bus.c b/drivers/base/bus.c
37808index 79bc203..fa3945b 100644
37809--- a/drivers/base/bus.c
37810+++ b/drivers/base/bus.c
37811@@ -1126,7 +1126,7 @@ int subsys_interface_register(struct subsys_interface *sif)
37812 return -EINVAL;
37813
37814 mutex_lock(&subsys->p->mutex);
37815- list_add_tail(&sif->node, &subsys->p->interfaces);
37816+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
37817 if (sif->add_dev) {
37818 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37819 while ((dev = subsys_dev_iter_next(&iter)))
37820@@ -1151,7 +1151,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
37821 subsys = sif->subsys;
37822
37823 mutex_lock(&subsys->p->mutex);
37824- list_del_init(&sif->node);
37825+ pax_list_del_init((struct list_head *)&sif->node);
37826 if (sif->remove_dev) {
37827 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37828 while ((dev = subsys_dev_iter_next(&iter)))
37829diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
37830index 25798db..15f130e 100644
37831--- a/drivers/base/devtmpfs.c
37832+++ b/drivers/base/devtmpfs.c
37833@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
37834 if (!thread)
37835 return 0;
37836
37837- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
37838+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
37839 if (err)
37840 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
37841 else
37842@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
37843 *err = sys_unshare(CLONE_NEWNS);
37844 if (*err)
37845 goto out;
37846- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
37847+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
37848 if (*err)
37849 goto out;
37850- sys_chdir("/.."); /* will traverse into overmounted root */
37851- sys_chroot(".");
37852+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
37853+ sys_chroot((char __force_user *)".");
37854 complete(&setup_done);
37855 while (1) {
37856 spin_lock(&req_lock);
37857diff --git a/drivers/base/node.c b/drivers/base/node.c
37858index 36fabe43..8cfc112 100644
37859--- a/drivers/base/node.c
37860+++ b/drivers/base/node.c
37861@@ -615,7 +615,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
37862 struct node_attr {
37863 struct device_attribute attr;
37864 enum node_states state;
37865-};
37866+} __do_const;
37867
37868 static ssize_t show_node_state(struct device *dev,
37869 struct device_attribute *attr, char *buf)
37870diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
37871index 45937f8..b9a342e 100644
37872--- a/drivers/base/power/domain.c
37873+++ b/drivers/base/power/domain.c
37874@@ -1698,7 +1698,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
37875 {
37876 struct cpuidle_driver *cpuidle_drv;
37877 struct gpd_cpuidle_data *cpuidle_data;
37878- struct cpuidle_state *idle_state;
37879+ cpuidle_state_no_const *idle_state;
37880 int ret = 0;
37881
37882 if (IS_ERR_OR_NULL(genpd) || state < 0)
37883@@ -1766,7 +1766,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
37884 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
37885 {
37886 struct gpd_cpuidle_data *cpuidle_data;
37887- struct cpuidle_state *idle_state;
37888+ cpuidle_state_no_const *idle_state;
37889 int ret = 0;
37890
37891 if (IS_ERR_OR_NULL(genpd))
37892@@ -2195,7 +2195,10 @@ int genpd_dev_pm_attach(struct device *dev)
37893 return ret;
37894 }
37895
37896- dev->pm_domain->detach = genpd_dev_pm_detach;
37897+ pax_open_kernel();
37898+ *(void **)&dev->pm_domain->detach = genpd_dev_pm_detach;
37899+ pax_close_kernel();
37900+
37901 pm_genpd_poweron(pd);
37902
37903 return 0;
37904diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
37905index d2be3f9..0a3167a 100644
37906--- a/drivers/base/power/sysfs.c
37907+++ b/drivers/base/power/sysfs.c
37908@@ -181,7 +181,7 @@ static ssize_t rtpm_status_show(struct device *dev,
37909 return -EIO;
37910 }
37911 }
37912- return sprintf(buf, p);
37913+ return sprintf(buf, "%s", p);
37914 }
37915
37916 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
37917diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
37918index aab7158..b172db2 100644
37919--- a/drivers/base/power/wakeup.c
37920+++ b/drivers/base/power/wakeup.c
37921@@ -32,14 +32,14 @@ static bool pm_abort_suspend __read_mostly;
37922 * They need to be modified together atomically, so it's better to use one
37923 * atomic variable to hold them both.
37924 */
37925-static atomic_t combined_event_count = ATOMIC_INIT(0);
37926+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
37927
37928 #define IN_PROGRESS_BITS (sizeof(int) * 4)
37929 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
37930
37931 static void split_counters(unsigned int *cnt, unsigned int *inpr)
37932 {
37933- unsigned int comb = atomic_read(&combined_event_count);
37934+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
37935
37936 *cnt = (comb >> IN_PROGRESS_BITS);
37937 *inpr = comb & MAX_IN_PROGRESS;
37938@@ -404,7 +404,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
37939 ws->start_prevent_time = ws->last_time;
37940
37941 /* Increment the counter of events in progress. */
37942- cec = atomic_inc_return(&combined_event_count);
37943+ cec = atomic_inc_return_unchecked(&combined_event_count);
37944
37945 trace_wakeup_source_activate(ws->name, cec);
37946 }
37947@@ -530,7 +530,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
37948 * Increment the counter of registered wakeup events and decrement the
37949 * couter of wakeup events in progress simultaneously.
37950 */
37951- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
37952+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
37953 trace_wakeup_source_deactivate(ws->name, cec);
37954
37955 split_counters(&cnt, &inpr);
37956diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
37957index 8d98a32..61d3165 100644
37958--- a/drivers/base/syscore.c
37959+++ b/drivers/base/syscore.c
37960@@ -22,7 +22,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
37961 void register_syscore_ops(struct syscore_ops *ops)
37962 {
37963 mutex_lock(&syscore_ops_lock);
37964- list_add_tail(&ops->node, &syscore_ops_list);
37965+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
37966 mutex_unlock(&syscore_ops_lock);
37967 }
37968 EXPORT_SYMBOL_GPL(register_syscore_ops);
37969@@ -34,7 +34,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
37970 void unregister_syscore_ops(struct syscore_ops *ops)
37971 {
37972 mutex_lock(&syscore_ops_lock);
37973- list_del(&ops->node);
37974+ pax_list_del((struct list_head *)&ops->node);
37975 mutex_unlock(&syscore_ops_lock);
37976 }
37977 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
37978diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
37979index ff20f19..018f1da 100644
37980--- a/drivers/block/cciss.c
37981+++ b/drivers/block/cciss.c
37982@@ -3008,7 +3008,7 @@ static void start_io(ctlr_info_t *h)
37983 while (!list_empty(&h->reqQ)) {
37984 c = list_entry(h->reqQ.next, CommandList_struct, list);
37985 /* can't do anything if fifo is full */
37986- if ((h->access.fifo_full(h))) {
37987+ if ((h->access->fifo_full(h))) {
37988 dev_warn(&h->pdev->dev, "fifo full\n");
37989 break;
37990 }
37991@@ -3018,7 +3018,7 @@ static void start_io(ctlr_info_t *h)
37992 h->Qdepth--;
37993
37994 /* Tell the controller execute command */
37995- h->access.submit_command(h, c);
37996+ h->access->submit_command(h, c);
37997
37998 /* Put job onto the completed Q */
37999 addQ(&h->cmpQ, c);
38000@@ -3444,17 +3444,17 @@ startio:
38001
38002 static inline unsigned long get_next_completion(ctlr_info_t *h)
38003 {
38004- return h->access.command_completed(h);
38005+ return h->access->command_completed(h);
38006 }
38007
38008 static inline int interrupt_pending(ctlr_info_t *h)
38009 {
38010- return h->access.intr_pending(h);
38011+ return h->access->intr_pending(h);
38012 }
38013
38014 static inline long interrupt_not_for_us(ctlr_info_t *h)
38015 {
38016- return ((h->access.intr_pending(h) == 0) ||
38017+ return ((h->access->intr_pending(h) == 0) ||
38018 (h->interrupts_enabled == 0));
38019 }
38020
38021@@ -3487,7 +3487,7 @@ static inline u32 next_command(ctlr_info_t *h)
38022 u32 a;
38023
38024 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
38025- return h->access.command_completed(h);
38026+ return h->access->command_completed(h);
38027
38028 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
38029 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
38030@@ -4044,7 +4044,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
38031 trans_support & CFGTBL_Trans_use_short_tags);
38032
38033 /* Change the access methods to the performant access methods */
38034- h->access = SA5_performant_access;
38035+ h->access = &SA5_performant_access;
38036 h->transMethod = CFGTBL_Trans_Performant;
38037
38038 return;
38039@@ -4318,7 +4318,7 @@ static int cciss_pci_init(ctlr_info_t *h)
38040 if (prod_index < 0)
38041 return -ENODEV;
38042 h->product_name = products[prod_index].product_name;
38043- h->access = *(products[prod_index].access);
38044+ h->access = products[prod_index].access;
38045
38046 if (cciss_board_disabled(h)) {
38047 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
38048@@ -5050,7 +5050,7 @@ reinit_after_soft_reset:
38049 }
38050
38051 /* make sure the board interrupts are off */
38052- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38053+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38054 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
38055 if (rc)
38056 goto clean2;
38057@@ -5100,7 +5100,7 @@ reinit_after_soft_reset:
38058 * fake ones to scoop up any residual completions.
38059 */
38060 spin_lock_irqsave(&h->lock, flags);
38061- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38062+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38063 spin_unlock_irqrestore(&h->lock, flags);
38064 free_irq(h->intr[h->intr_mode], h);
38065 rc = cciss_request_irq(h, cciss_msix_discard_completions,
38066@@ -5120,9 +5120,9 @@ reinit_after_soft_reset:
38067 dev_info(&h->pdev->dev, "Board READY.\n");
38068 dev_info(&h->pdev->dev,
38069 "Waiting for stale completions to drain.\n");
38070- h->access.set_intr_mask(h, CCISS_INTR_ON);
38071+ h->access->set_intr_mask(h, CCISS_INTR_ON);
38072 msleep(10000);
38073- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38074+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38075
38076 rc = controller_reset_failed(h->cfgtable);
38077 if (rc)
38078@@ -5145,7 +5145,7 @@ reinit_after_soft_reset:
38079 cciss_scsi_setup(h);
38080
38081 /* Turn the interrupts on so we can service requests */
38082- h->access.set_intr_mask(h, CCISS_INTR_ON);
38083+ h->access->set_intr_mask(h, CCISS_INTR_ON);
38084
38085 /* Get the firmware version */
38086 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
38087@@ -5217,7 +5217,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
38088 kfree(flush_buf);
38089 if (return_code != IO_OK)
38090 dev_warn(&h->pdev->dev, "Error flushing cache\n");
38091- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38092+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38093 free_irq(h->intr[h->intr_mode], h);
38094 }
38095
38096diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
38097index 7fda30e..2f27946 100644
38098--- a/drivers/block/cciss.h
38099+++ b/drivers/block/cciss.h
38100@@ -101,7 +101,7 @@ struct ctlr_info
38101 /* information about each logical volume */
38102 drive_info_struct *drv[CISS_MAX_LUN];
38103
38104- struct access_method access;
38105+ struct access_method *access;
38106
38107 /* queue and queue Info */
38108 struct list_head reqQ;
38109@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
38110 }
38111
38112 static struct access_method SA5_access = {
38113- SA5_submit_command,
38114- SA5_intr_mask,
38115- SA5_fifo_full,
38116- SA5_intr_pending,
38117- SA5_completed,
38118+ .submit_command = SA5_submit_command,
38119+ .set_intr_mask = SA5_intr_mask,
38120+ .fifo_full = SA5_fifo_full,
38121+ .intr_pending = SA5_intr_pending,
38122+ .command_completed = SA5_completed,
38123 };
38124
38125 static struct access_method SA5B_access = {
38126- SA5_submit_command,
38127- SA5B_intr_mask,
38128- SA5_fifo_full,
38129- SA5B_intr_pending,
38130- SA5_completed,
38131+ .submit_command = SA5_submit_command,
38132+ .set_intr_mask = SA5B_intr_mask,
38133+ .fifo_full = SA5_fifo_full,
38134+ .intr_pending = SA5B_intr_pending,
38135+ .command_completed = SA5_completed,
38136 };
38137
38138 static struct access_method SA5_performant_access = {
38139- SA5_submit_command,
38140- SA5_performant_intr_mask,
38141- SA5_fifo_full,
38142- SA5_performant_intr_pending,
38143- SA5_performant_completed,
38144+ .submit_command = SA5_submit_command,
38145+ .set_intr_mask = SA5_performant_intr_mask,
38146+ .fifo_full = SA5_fifo_full,
38147+ .intr_pending = SA5_performant_intr_pending,
38148+ .command_completed = SA5_performant_completed,
38149 };
38150
38151 struct board_type {
38152diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
38153index 2b94403..fd6ad1f 100644
38154--- a/drivers/block/cpqarray.c
38155+++ b/drivers/block/cpqarray.c
38156@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
38157 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
38158 goto Enomem4;
38159 }
38160- hba[i]->access.set_intr_mask(hba[i], 0);
38161+ hba[i]->access->set_intr_mask(hba[i], 0);
38162 if (request_irq(hba[i]->intr, do_ida_intr,
38163 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
38164 {
38165@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
38166 add_timer(&hba[i]->timer);
38167
38168 /* Enable IRQ now that spinlock and rate limit timer are set up */
38169- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
38170+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
38171
38172 for(j=0; j<NWD; j++) {
38173 struct gendisk *disk = ida_gendisk[i][j];
38174@@ -694,7 +694,7 @@ DBGINFO(
38175 for(i=0; i<NR_PRODUCTS; i++) {
38176 if (board_id == products[i].board_id) {
38177 c->product_name = products[i].product_name;
38178- c->access = *(products[i].access);
38179+ c->access = products[i].access;
38180 break;
38181 }
38182 }
38183@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
38184 hba[ctlr]->intr = intr;
38185 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
38186 hba[ctlr]->product_name = products[j].product_name;
38187- hba[ctlr]->access = *(products[j].access);
38188+ hba[ctlr]->access = products[j].access;
38189 hba[ctlr]->ctlr = ctlr;
38190 hba[ctlr]->board_id = board_id;
38191 hba[ctlr]->pci_dev = NULL; /* not PCI */
38192@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
38193
38194 while((c = h->reqQ) != NULL) {
38195 /* Can't do anything if we're busy */
38196- if (h->access.fifo_full(h) == 0)
38197+ if (h->access->fifo_full(h) == 0)
38198 return;
38199
38200 /* Get the first entry from the request Q */
38201@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
38202 h->Qdepth--;
38203
38204 /* Tell the controller to do our bidding */
38205- h->access.submit_command(h, c);
38206+ h->access->submit_command(h, c);
38207
38208 /* Get onto the completion Q */
38209 addQ(&h->cmpQ, c);
38210@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
38211 unsigned long flags;
38212 __u32 a,a1;
38213
38214- istat = h->access.intr_pending(h);
38215+ istat = h->access->intr_pending(h);
38216 /* Is this interrupt for us? */
38217 if (istat == 0)
38218 return IRQ_NONE;
38219@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
38220 */
38221 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
38222 if (istat & FIFO_NOT_EMPTY) {
38223- while((a = h->access.command_completed(h))) {
38224+ while((a = h->access->command_completed(h))) {
38225 a1 = a; a &= ~3;
38226 if ((c = h->cmpQ) == NULL)
38227 {
38228@@ -1448,11 +1448,11 @@ static int sendcmd(
38229 /*
38230 * Disable interrupt
38231 */
38232- info_p->access.set_intr_mask(info_p, 0);
38233+ info_p->access->set_intr_mask(info_p, 0);
38234 /* Make sure there is room in the command FIFO */
38235 /* Actually it should be completely empty at this time. */
38236 for (i = 200000; i > 0; i--) {
38237- temp = info_p->access.fifo_full(info_p);
38238+ temp = info_p->access->fifo_full(info_p);
38239 if (temp != 0) {
38240 break;
38241 }
38242@@ -1465,7 +1465,7 @@ DBG(
38243 /*
38244 * Send the cmd
38245 */
38246- info_p->access.submit_command(info_p, c);
38247+ info_p->access->submit_command(info_p, c);
38248 complete = pollcomplete(ctlr);
38249
38250 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
38251@@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
38252 * we check the new geometry. Then turn interrupts back on when
38253 * we're done.
38254 */
38255- host->access.set_intr_mask(host, 0);
38256+ host->access->set_intr_mask(host, 0);
38257 getgeometry(ctlr);
38258- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
38259+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
38260
38261 for(i=0; i<NWD; i++) {
38262 struct gendisk *disk = ida_gendisk[ctlr][i];
38263@@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
38264 /* Wait (up to 2 seconds) for a command to complete */
38265
38266 for (i = 200000; i > 0; i--) {
38267- done = hba[ctlr]->access.command_completed(hba[ctlr]);
38268+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
38269 if (done == 0) {
38270 udelay(10); /* a short fixed delay */
38271 } else
38272diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
38273index be73e9d..7fbf140 100644
38274--- a/drivers/block/cpqarray.h
38275+++ b/drivers/block/cpqarray.h
38276@@ -99,7 +99,7 @@ struct ctlr_info {
38277 drv_info_t drv[NWD];
38278 struct proc_dir_entry *proc;
38279
38280- struct access_method access;
38281+ struct access_method *access;
38282
38283 cmdlist_t *reqQ;
38284 cmdlist_t *cmpQ;
38285diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
38286index 434c77d..6d3219a 100644
38287--- a/drivers/block/drbd/drbd_bitmap.c
38288+++ b/drivers/block/drbd/drbd_bitmap.c
38289@@ -1036,7 +1036,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
38290 submit_bio(rw, bio);
38291 /* this should not count as user activity and cause the
38292 * resync to throttle -- see drbd_rs_should_slow_down(). */
38293- atomic_add(len >> 9, &device->rs_sect_ev);
38294+ atomic_add_unchecked(len >> 9, &device->rs_sect_ev);
38295 }
38296 }
38297
38298diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
38299index b905e98..0812ed8 100644
38300--- a/drivers/block/drbd/drbd_int.h
38301+++ b/drivers/block/drbd/drbd_int.h
38302@@ -385,7 +385,7 @@ struct drbd_epoch {
38303 struct drbd_connection *connection;
38304 struct list_head list;
38305 unsigned int barrier_nr;
38306- atomic_t epoch_size; /* increased on every request added. */
38307+ atomic_unchecked_t epoch_size; /* increased on every request added. */
38308 atomic_t active; /* increased on every req. added, and dec on every finished. */
38309 unsigned long flags;
38310 };
38311@@ -946,7 +946,7 @@ struct drbd_device {
38312 unsigned int al_tr_number;
38313 int al_tr_cycle;
38314 wait_queue_head_t seq_wait;
38315- atomic_t packet_seq;
38316+ atomic_unchecked_t packet_seq;
38317 unsigned int peer_seq;
38318 spinlock_t peer_seq_lock;
38319 unsigned long comm_bm_set; /* communicated number of set bits. */
38320@@ -955,8 +955,8 @@ struct drbd_device {
38321 struct mutex own_state_mutex;
38322 struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
38323 char congestion_reason; /* Why we where congested... */
38324- atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38325- atomic_t rs_sect_ev; /* for submitted resync data rate, both */
38326+ atomic_unchecked_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38327+ atomic_unchecked_t rs_sect_ev; /* for submitted resync data rate, both */
38328 int rs_last_sect_ev; /* counter to compare with */
38329 int rs_last_events; /* counter of read or write "events" (unit sectors)
38330 * on the lower level device when we last looked. */
38331diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
38332index 1fc8342..7e7742b 100644
38333--- a/drivers/block/drbd/drbd_main.c
38334+++ b/drivers/block/drbd/drbd_main.c
38335@@ -1328,7 +1328,7 @@ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet
38336 p->sector = sector;
38337 p->block_id = block_id;
38338 p->blksize = blksize;
38339- p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
38340+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&peer_device->device->packet_seq));
38341 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
38342 }
38343
38344@@ -1634,7 +1634,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
38345 return -EIO;
38346 p->sector = cpu_to_be64(req->i.sector);
38347 p->block_id = (unsigned long)req;
38348- p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
38349+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&device->packet_seq));
38350 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
38351 if (device->state.conn >= C_SYNC_SOURCE &&
38352 device->state.conn <= C_PAUSED_SYNC_T)
38353@@ -1915,8 +1915,8 @@ void drbd_init_set_defaults(struct drbd_device *device)
38354 atomic_set(&device->unacked_cnt, 0);
38355 atomic_set(&device->local_cnt, 0);
38356 atomic_set(&device->pp_in_use_by_net, 0);
38357- atomic_set(&device->rs_sect_in, 0);
38358- atomic_set(&device->rs_sect_ev, 0);
38359+ atomic_set_unchecked(&device->rs_sect_in, 0);
38360+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38361 atomic_set(&device->ap_in_flight, 0);
38362 atomic_set(&device->md_io.in_use, 0);
38363
38364@@ -2684,8 +2684,8 @@ void drbd_destroy_connection(struct kref *kref)
38365 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
38366 struct drbd_resource *resource = connection->resource;
38367
38368- if (atomic_read(&connection->current_epoch->epoch_size) != 0)
38369- drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
38370+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size) != 0)
38371+ drbd_err(connection, "epoch_size:%d\n", atomic_read_unchecked(&connection->current_epoch->epoch_size));
38372 kfree(connection->current_epoch);
38373
38374 idr_destroy(&connection->peer_devices);
38375diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
38376index 74df8cf..e41fc24 100644
38377--- a/drivers/block/drbd/drbd_nl.c
38378+++ b/drivers/block/drbd/drbd_nl.c
38379@@ -3637,13 +3637,13 @@ finish:
38380
38381 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
38382 {
38383- static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38384+ static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38385 struct sk_buff *msg;
38386 struct drbd_genlmsghdr *d_out;
38387 unsigned seq;
38388 int err = -ENOMEM;
38389
38390- seq = atomic_inc_return(&drbd_genl_seq);
38391+ seq = atomic_inc_return_unchecked(&drbd_genl_seq);
38392 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
38393 if (!msg)
38394 goto failed;
38395diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
38396index cee2035..22f66bd 100644
38397--- a/drivers/block/drbd/drbd_receiver.c
38398+++ b/drivers/block/drbd/drbd_receiver.c
38399@@ -870,7 +870,7 @@ int drbd_connected(struct drbd_peer_device *peer_device)
38400 struct drbd_device *device = peer_device->device;
38401 int err;
38402
38403- atomic_set(&device->packet_seq, 0);
38404+ atomic_set_unchecked(&device->packet_seq, 0);
38405 device->peer_seq = 0;
38406
38407 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
38408@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38409 do {
38410 next_epoch = NULL;
38411
38412- epoch_size = atomic_read(&epoch->epoch_size);
38413+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
38414
38415 switch (ev & ~EV_CLEANUP) {
38416 case EV_PUT:
38417@@ -1273,7 +1273,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38418 rv = FE_DESTROYED;
38419 } else {
38420 epoch->flags = 0;
38421- atomic_set(&epoch->epoch_size, 0);
38422+ atomic_set_unchecked(&epoch->epoch_size, 0);
38423 /* atomic_set(&epoch->active, 0); is already zero */
38424 if (rv == FE_STILL_LIVE)
38425 rv = FE_RECYCLED;
38426@@ -1550,7 +1550,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38427 conn_wait_active_ee_empty(connection);
38428 drbd_flush(connection);
38429
38430- if (atomic_read(&connection->current_epoch->epoch_size)) {
38431+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38432 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
38433 if (epoch)
38434 break;
38435@@ -1564,11 +1564,11 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38436 }
38437
38438 epoch->flags = 0;
38439- atomic_set(&epoch->epoch_size, 0);
38440+ atomic_set_unchecked(&epoch->epoch_size, 0);
38441 atomic_set(&epoch->active, 0);
38442
38443 spin_lock(&connection->epoch_lock);
38444- if (atomic_read(&connection->current_epoch->epoch_size)) {
38445+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38446 list_add(&epoch->list, &connection->current_epoch->list);
38447 connection->current_epoch = epoch;
38448 connection->epochs++;
38449@@ -1802,7 +1802,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
38450 list_add_tail(&peer_req->w.list, &device->sync_ee);
38451 spin_unlock_irq(&device->resource->req_lock);
38452
38453- atomic_add(pi->size >> 9, &device->rs_sect_ev);
38454+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_ev);
38455 if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
38456 return 0;
38457
38458@@ -1900,7 +1900,7 @@ static int receive_RSDataReply(struct drbd_connection *connection, struct packet
38459 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38460 }
38461
38462- atomic_add(pi->size >> 9, &device->rs_sect_in);
38463+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_in);
38464
38465 return err;
38466 }
38467@@ -2290,7 +2290,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38468
38469 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
38470 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38471- atomic_inc(&connection->current_epoch->epoch_size);
38472+ atomic_inc_unchecked(&connection->current_epoch->epoch_size);
38473 err2 = drbd_drain_block(peer_device, pi->size);
38474 if (!err)
38475 err = err2;
38476@@ -2334,7 +2334,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38477
38478 spin_lock(&connection->epoch_lock);
38479 peer_req->epoch = connection->current_epoch;
38480- atomic_inc(&peer_req->epoch->epoch_size);
38481+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
38482 atomic_inc(&peer_req->epoch->active);
38483 spin_unlock(&connection->epoch_lock);
38484
38485@@ -2479,7 +2479,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
38486
38487 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
38488 (int)part_stat_read(&disk->part0, sectors[1]) -
38489- atomic_read(&device->rs_sect_ev);
38490+ atomic_read_unchecked(&device->rs_sect_ev);
38491
38492 if (atomic_read(&device->ap_actlog_cnt)
38493 || curr_events - device->rs_last_events > 64) {
38494@@ -2618,7 +2618,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38495 device->use_csums = true;
38496 } else if (pi->cmd == P_OV_REPLY) {
38497 /* track progress, we may need to throttle */
38498- atomic_add(size >> 9, &device->rs_sect_in);
38499+ atomic_add_unchecked(size >> 9, &device->rs_sect_in);
38500 peer_req->w.cb = w_e_end_ov_reply;
38501 dec_rs_pending(device);
38502 /* drbd_rs_begin_io done when we sent this request,
38503@@ -2691,7 +2691,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38504 goto out_free_e;
38505
38506 submit_for_resync:
38507- atomic_add(size >> 9, &device->rs_sect_ev);
38508+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38509
38510 submit:
38511 update_receiver_timing_details(connection, drbd_submit_peer_request);
38512@@ -4564,7 +4564,7 @@ struct data_cmd {
38513 int expect_payload;
38514 size_t pkt_size;
38515 int (*fn)(struct drbd_connection *, struct packet_info *);
38516-};
38517+} __do_const;
38518
38519 static struct data_cmd drbd_cmd_handler[] = {
38520 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
38521@@ -4678,7 +4678,7 @@ static void conn_disconnect(struct drbd_connection *connection)
38522 if (!list_empty(&connection->current_epoch->list))
38523 drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
38524 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
38525- atomic_set(&connection->current_epoch->epoch_size, 0);
38526+ atomic_set_unchecked(&connection->current_epoch->epoch_size, 0);
38527 connection->send.seen_any_write_yet = false;
38528
38529 drbd_info(connection, "Connection closed\n");
38530@@ -5182,7 +5182,7 @@ static int got_IsInSync(struct drbd_connection *connection, struct packet_info *
38531 put_ldev(device);
38532 }
38533 dec_rs_pending(device);
38534- atomic_add(blksize >> 9, &device->rs_sect_in);
38535+ atomic_add_unchecked(blksize >> 9, &device->rs_sect_in);
38536
38537 return 0;
38538 }
38539@@ -5470,7 +5470,7 @@ static int connection_finish_peer_reqs(struct drbd_connection *connection)
38540 struct asender_cmd {
38541 size_t pkt_size;
38542 int (*fn)(struct drbd_connection *connection, struct packet_info *);
38543-};
38544+} __do_const;
38545
38546 static struct asender_cmd asender_tbl[] = {
38547 [P_PING] = { 0, got_Ping },
38548diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
38549index d0fae55..4469096 100644
38550--- a/drivers/block/drbd/drbd_worker.c
38551+++ b/drivers/block/drbd/drbd_worker.c
38552@@ -408,7 +408,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
38553 list_add_tail(&peer_req->w.list, &device->read_ee);
38554 spin_unlock_irq(&device->resource->req_lock);
38555
38556- atomic_add(size >> 9, &device->rs_sect_ev);
38557+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38558 if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
38559 return 0;
38560
38561@@ -553,7 +553,7 @@ static int drbd_rs_number_requests(struct drbd_device *device)
38562 unsigned int sect_in; /* Number of sectors that came in since the last turn */
38563 int number, mxb;
38564
38565- sect_in = atomic_xchg(&device->rs_sect_in, 0);
38566+ sect_in = atomic_xchg_unchecked(&device->rs_sect_in, 0);
38567 device->rs_in_flight -= sect_in;
38568
38569 rcu_read_lock();
38570@@ -1595,8 +1595,8 @@ void drbd_rs_controller_reset(struct drbd_device *device)
38571 struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
38572 struct fifo_buffer *plan;
38573
38574- atomic_set(&device->rs_sect_in, 0);
38575- atomic_set(&device->rs_sect_ev, 0);
38576+ atomic_set_unchecked(&device->rs_sect_in, 0);
38577+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38578 device->rs_in_flight = 0;
38579 device->rs_last_events =
38580 (int)part_stat_read(&disk->part0, sectors[0]) +
38581diff --git a/drivers/block/loop.c b/drivers/block/loop.c
38582index 773e964..e85af00 100644
38583--- a/drivers/block/loop.c
38584+++ b/drivers/block/loop.c
38585@@ -234,7 +234,7 @@ static int __do_lo_send_write(struct file *file,
38586
38587 file_start_write(file);
38588 set_fs(get_ds());
38589- bw = file->f_op->write(file, buf, len, &pos);
38590+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
38591 set_fs(old_fs);
38592 file_end_write(file);
38593 if (likely(bw == len))
38594diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
38595index 09e628da..7607aaa 100644
38596--- a/drivers/block/pktcdvd.c
38597+++ b/drivers/block/pktcdvd.c
38598@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
38599
38600 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
38601 {
38602- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
38603+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
38604 }
38605
38606 /*
38607@@ -1890,7 +1890,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
38608 return -EROFS;
38609 }
38610 pd->settings.fp = ti.fp;
38611- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
38612+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
38613
38614 if (ti.nwa_v) {
38615 pd->nwa = be32_to_cpu(ti.next_writable);
38616diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
38617index b67066d..515b7f4 100644
38618--- a/drivers/block/rbd.c
38619+++ b/drivers/block/rbd.c
38620@@ -64,7 +64,7 @@
38621 * If the counter is already at its maximum value returns
38622 * -EINVAL without updating it.
38623 */
38624-static int atomic_inc_return_safe(atomic_t *v)
38625+static int __intentional_overflow(-1) atomic_inc_return_safe(atomic_t *v)
38626 {
38627 unsigned int counter;
38628
38629diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
38630index e5565fb..71be10b4 100644
38631--- a/drivers/block/smart1,2.h
38632+++ b/drivers/block/smart1,2.h
38633@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
38634 }
38635
38636 static struct access_method smart4_access = {
38637- smart4_submit_command,
38638- smart4_intr_mask,
38639- smart4_fifo_full,
38640- smart4_intr_pending,
38641- smart4_completed,
38642+ .submit_command = smart4_submit_command,
38643+ .set_intr_mask = smart4_intr_mask,
38644+ .fifo_full = smart4_fifo_full,
38645+ .intr_pending = smart4_intr_pending,
38646+ .command_completed = smart4_completed,
38647 };
38648
38649 /*
38650@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
38651 }
38652
38653 static struct access_method smart2_access = {
38654- smart2_submit_command,
38655- smart2_intr_mask,
38656- smart2_fifo_full,
38657- smart2_intr_pending,
38658- smart2_completed,
38659+ .submit_command = smart2_submit_command,
38660+ .set_intr_mask = smart2_intr_mask,
38661+ .fifo_full = smart2_fifo_full,
38662+ .intr_pending = smart2_intr_pending,
38663+ .command_completed = smart2_completed,
38664 };
38665
38666 /*
38667@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
38668 }
38669
38670 static struct access_method smart2e_access = {
38671- smart2e_submit_command,
38672- smart2e_intr_mask,
38673- smart2e_fifo_full,
38674- smart2e_intr_pending,
38675- smart2e_completed,
38676+ .submit_command = smart2e_submit_command,
38677+ .set_intr_mask = smart2e_intr_mask,
38678+ .fifo_full = smart2e_fifo_full,
38679+ .intr_pending = smart2e_intr_pending,
38680+ .command_completed = smart2e_completed,
38681 };
38682
38683 /*
38684@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
38685 }
38686
38687 static struct access_method smart1_access = {
38688- smart1_submit_command,
38689- smart1_intr_mask,
38690- smart1_fifo_full,
38691- smart1_intr_pending,
38692- smart1_completed,
38693+ .submit_command = smart1_submit_command,
38694+ .set_intr_mask = smart1_intr_mask,
38695+ .fifo_full = smart1_fifo_full,
38696+ .intr_pending = smart1_intr_pending,
38697+ .command_completed = smart1_completed,
38698 };
38699diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
38700index 55c135b..9f8d60c 100644
38701--- a/drivers/bluetooth/btwilink.c
38702+++ b/drivers/bluetooth/btwilink.c
38703@@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
38704
38705 static int bt_ti_probe(struct platform_device *pdev)
38706 {
38707- static struct ti_st *hst;
38708+ struct ti_st *hst;
38709 struct hci_dev *hdev;
38710 int err;
38711
38712diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
38713index 5d28a45..a538f90 100644
38714--- a/drivers/cdrom/cdrom.c
38715+++ b/drivers/cdrom/cdrom.c
38716@@ -610,7 +610,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
38717 ENSURE(reset, CDC_RESET);
38718 ENSURE(generic_packet, CDC_GENERIC_PACKET);
38719 cdi->mc_flags = 0;
38720- cdo->n_minors = 0;
38721 cdi->options = CDO_USE_FFLAGS;
38722
38723 if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
38724@@ -630,8 +629,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
38725 else
38726 cdi->cdda_method = CDDA_OLD;
38727
38728- if (!cdo->generic_packet)
38729- cdo->generic_packet = cdrom_dummy_generic_packet;
38730+ if (!cdo->generic_packet) {
38731+ pax_open_kernel();
38732+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
38733+ pax_close_kernel();
38734+ }
38735
38736 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
38737 mutex_lock(&cdrom_mutex);
38738@@ -652,7 +654,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
38739 if (cdi->exit)
38740 cdi->exit(cdi);
38741
38742- cdi->ops->n_minors--;
38743 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
38744 }
38745
38746@@ -2126,7 +2127,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
38747 */
38748 nr = nframes;
38749 do {
38750- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38751+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38752 if (cgc.buffer)
38753 break;
38754
38755@@ -3434,7 +3435,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
38756 struct cdrom_device_info *cdi;
38757 int ret;
38758
38759- ret = scnprintf(info + *pos, max_size - *pos, header);
38760+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
38761 if (!ret)
38762 return 1;
38763
38764diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
38765index 584bc31..e64a12c 100644
38766--- a/drivers/cdrom/gdrom.c
38767+++ b/drivers/cdrom/gdrom.c
38768@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
38769 .audio_ioctl = gdrom_audio_ioctl,
38770 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
38771 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
38772- .n_minors = 1,
38773 };
38774
38775 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
38776diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
38777index a4af822..ed58cd1 100644
38778--- a/drivers/char/Kconfig
38779+++ b/drivers/char/Kconfig
38780@@ -17,7 +17,8 @@ config DEVMEM
38781
38782 config DEVKMEM
38783 bool "/dev/kmem virtual device support"
38784- default y
38785+ default n
38786+ depends on !GRKERNSEC_KMEM
38787 help
38788 Say Y here if you want to support the /dev/kmem device. The
38789 /dev/kmem device is rarely used, but can be used for certain
38790@@ -586,6 +587,7 @@ config DEVPORT
38791 bool
38792 depends on !M68K
38793 depends on ISA || PCI
38794+ depends on !GRKERNSEC_KMEM
38795 default y
38796
38797 source "drivers/s390/char/Kconfig"
38798diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
38799index a48e05b..6bac831 100644
38800--- a/drivers/char/agp/compat_ioctl.c
38801+++ b/drivers/char/agp/compat_ioctl.c
38802@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
38803 return -ENOMEM;
38804 }
38805
38806- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
38807+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
38808 sizeof(*usegment) * ureserve.seg_count)) {
38809 kfree(usegment);
38810 kfree(ksegment);
38811diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
38812index 09f17eb..8531d2f 100644
38813--- a/drivers/char/agp/frontend.c
38814+++ b/drivers/char/agp/frontend.c
38815@@ -806,7 +806,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38816 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
38817 return -EFAULT;
38818
38819- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
38820+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
38821 return -EFAULT;
38822
38823 client = agp_find_client_by_pid(reserve.pid);
38824@@ -836,7 +836,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38825 if (segment == NULL)
38826 return -ENOMEM;
38827
38828- if (copy_from_user(segment, (void __user *) reserve.seg_list,
38829+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
38830 sizeof(struct agp_segment) * reserve.seg_count)) {
38831 kfree(segment);
38832 return -EFAULT;
38833diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
38834index 4f94375..413694e 100644
38835--- a/drivers/char/genrtc.c
38836+++ b/drivers/char/genrtc.c
38837@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
38838 switch (cmd) {
38839
38840 case RTC_PLL_GET:
38841+ memset(&pll, 0, sizeof(pll));
38842 if (get_rtc_pll(&pll))
38843 return -EINVAL;
38844 else
38845diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
38846index 5c0baa9..44011b1 100644
38847--- a/drivers/char/hpet.c
38848+++ b/drivers/char/hpet.c
38849@@ -575,7 +575,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
38850 }
38851
38852 static int
38853-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
38854+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
38855 struct hpet_info *info)
38856 {
38857 struct hpet_timer __iomem *timer;
38858diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
38859index 24cc4ed..f9807cf 100644
38860--- a/drivers/char/i8k.c
38861+++ b/drivers/char/i8k.c
38862@@ -788,7 +788,7 @@ static const struct i8k_config_data i8k_config_data[] = {
38863 },
38864 };
38865
38866-static struct dmi_system_id i8k_dmi_table[] __initdata = {
38867+static const struct dmi_system_id i8k_dmi_table[] __initconst = {
38868 {
38869 .ident = "Dell Inspiron",
38870 .matches = {
38871diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
38872index 9bb5928..57a7801 100644
38873--- a/drivers/char/ipmi/ipmi_msghandler.c
38874+++ b/drivers/char/ipmi/ipmi_msghandler.c
38875@@ -436,7 +436,7 @@ struct ipmi_smi {
38876 struct proc_dir_entry *proc_dir;
38877 char proc_dir_name[10];
38878
38879- atomic_t stats[IPMI_NUM_STATS];
38880+ atomic_unchecked_t stats[IPMI_NUM_STATS];
38881
38882 /*
38883 * run_to_completion duplicate of smb_info, smi_info
38884@@ -468,9 +468,9 @@ static LIST_HEAD(smi_watchers);
38885 static DEFINE_MUTEX(smi_watchers_mutex);
38886
38887 #define ipmi_inc_stat(intf, stat) \
38888- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
38889+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
38890 #define ipmi_get_stat(intf, stat) \
38891- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
38892+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
38893
38894 static char *addr_src_to_str[] = { "invalid", "hotmod", "hardcoded", "SPMI",
38895 "ACPI", "SMBIOS", "PCI",
38896@@ -2828,7 +2828,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
38897 INIT_LIST_HEAD(&intf->cmd_rcvrs);
38898 init_waitqueue_head(&intf->waitq);
38899 for (i = 0; i < IPMI_NUM_STATS; i++)
38900- atomic_set(&intf->stats[i], 0);
38901+ atomic_set_unchecked(&intf->stats[i], 0);
38902
38903 intf->proc_dir = NULL;
38904
38905diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
38906index 518585c..6c985cef 100644
38907--- a/drivers/char/ipmi/ipmi_si_intf.c
38908+++ b/drivers/char/ipmi/ipmi_si_intf.c
38909@@ -289,7 +289,7 @@ struct smi_info {
38910 unsigned char slave_addr;
38911
38912 /* Counters and things for the proc filesystem. */
38913- atomic_t stats[SI_NUM_STATS];
38914+ atomic_unchecked_t stats[SI_NUM_STATS];
38915
38916 struct task_struct *thread;
38917
38918@@ -298,9 +298,9 @@ struct smi_info {
38919 };
38920
38921 #define smi_inc_stat(smi, stat) \
38922- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
38923+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
38924 #define smi_get_stat(smi, stat) \
38925- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
38926+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
38927
38928 #define SI_MAX_PARMS 4
38929
38930@@ -3498,7 +3498,7 @@ static int try_smi_init(struct smi_info *new_smi)
38931 atomic_set(&new_smi->req_events, 0);
38932 new_smi->run_to_completion = false;
38933 for (i = 0; i < SI_NUM_STATS; i++)
38934- atomic_set(&new_smi->stats[i], 0);
38935+ atomic_set_unchecked(&new_smi->stats[i], 0);
38936
38937 new_smi->interrupt_disabled = true;
38938 atomic_set(&new_smi->need_watch, 0);
38939diff --git a/drivers/char/mem.c b/drivers/char/mem.c
38940index 297110c..3f69b43 100644
38941--- a/drivers/char/mem.c
38942+++ b/drivers/char/mem.c
38943@@ -18,6 +18,7 @@
38944 #include <linux/raw.h>
38945 #include <linux/tty.h>
38946 #include <linux/capability.h>
38947+#include <linux/security.h>
38948 #include <linux/ptrace.h>
38949 #include <linux/device.h>
38950 #include <linux/highmem.h>
38951@@ -36,6 +37,10 @@
38952
38953 #define DEVPORT_MINOR 4
38954
38955+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
38956+extern const struct file_operations grsec_fops;
38957+#endif
38958+
38959 static inline unsigned long size_inside_page(unsigned long start,
38960 unsigned long size)
38961 {
38962@@ -67,9 +72,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38963
38964 while (cursor < to) {
38965 if (!devmem_is_allowed(pfn)) {
38966+#ifdef CONFIG_GRKERNSEC_KMEM
38967+ gr_handle_mem_readwrite(from, to);
38968+#else
38969 printk(KERN_INFO
38970 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
38971 current->comm, from, to);
38972+#endif
38973 return 0;
38974 }
38975 cursor += PAGE_SIZE;
38976@@ -77,6 +86,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38977 }
38978 return 1;
38979 }
38980+#elif defined(CONFIG_GRKERNSEC_KMEM)
38981+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38982+{
38983+ return 0;
38984+}
38985 #else
38986 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38987 {
38988@@ -124,7 +138,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38989 #endif
38990
38991 while (count > 0) {
38992- unsigned long remaining;
38993+ unsigned long remaining = 0;
38994+ char *temp;
38995
38996 sz = size_inside_page(p, count);
38997
38998@@ -140,7 +155,24 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38999 if (!ptr)
39000 return -EFAULT;
39001
39002- remaining = copy_to_user(buf, ptr, sz);
39003+#ifdef CONFIG_PAX_USERCOPY
39004+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
39005+ if (!temp) {
39006+ unxlate_dev_mem_ptr(p, ptr);
39007+ return -ENOMEM;
39008+ }
39009+ remaining = probe_kernel_read(temp, ptr, sz);
39010+#else
39011+ temp = ptr;
39012+#endif
39013+
39014+ if (!remaining)
39015+ remaining = copy_to_user(buf, temp, sz);
39016+
39017+#ifdef CONFIG_PAX_USERCOPY
39018+ kfree(temp);
39019+#endif
39020+
39021 unxlate_dev_mem_ptr(p, ptr);
39022 if (remaining)
39023 return -EFAULT;
39024@@ -380,9 +412,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
39025 size_t count, loff_t *ppos)
39026 {
39027 unsigned long p = *ppos;
39028- ssize_t low_count, read, sz;
39029+ ssize_t low_count, read, sz, err = 0;
39030 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
39031- int err = 0;
39032
39033 read = 0;
39034 if (p < (unsigned long) high_memory) {
39035@@ -404,6 +435,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
39036 }
39037 #endif
39038 while (low_count > 0) {
39039+ char *temp;
39040+
39041 sz = size_inside_page(p, low_count);
39042
39043 /*
39044@@ -413,7 +446,23 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
39045 */
39046 kbuf = xlate_dev_kmem_ptr((void *)p);
39047
39048- if (copy_to_user(buf, kbuf, sz))
39049+#ifdef CONFIG_PAX_USERCOPY
39050+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
39051+ if (!temp)
39052+ return -ENOMEM;
39053+ err = probe_kernel_read(temp, kbuf, sz);
39054+#else
39055+ temp = kbuf;
39056+#endif
39057+
39058+ if (!err)
39059+ err = copy_to_user(buf, temp, sz);
39060+
39061+#ifdef CONFIG_PAX_USERCOPY
39062+ kfree(temp);
39063+#endif
39064+
39065+ if (err)
39066 return -EFAULT;
39067 buf += sz;
39068 p += sz;
39069@@ -804,6 +853,9 @@ static const struct memdev {
39070 #ifdef CONFIG_PRINTK
39071 [11] = { "kmsg", 0644, &kmsg_fops, 0 },
39072 #endif
39073+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
39074+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, 0 },
39075+#endif
39076 };
39077
39078 static int memory_open(struct inode *inode, struct file *filp)
39079@@ -865,7 +917,7 @@ static int __init chr_dev_init(void)
39080 continue;
39081
39082 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
39083- NULL, devlist[minor].name);
39084+ NULL, "%s", devlist[minor].name);
39085 }
39086
39087 return tty_init();
39088diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
39089index 9df78e2..01ba9ae 100644
39090--- a/drivers/char/nvram.c
39091+++ b/drivers/char/nvram.c
39092@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
39093
39094 spin_unlock_irq(&rtc_lock);
39095
39096- if (copy_to_user(buf, contents, tmp - contents))
39097+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
39098 return -EFAULT;
39099
39100 *ppos = i;
39101diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
39102index 0ea9986..e7b07e4 100644
39103--- a/drivers/char/pcmcia/synclink_cs.c
39104+++ b/drivers/char/pcmcia/synclink_cs.c
39105@@ -2345,7 +2345,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
39106
39107 if (debug_level >= DEBUG_LEVEL_INFO)
39108 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
39109- __FILE__, __LINE__, info->device_name, port->count);
39110+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
39111
39112 if (tty_port_close_start(port, tty, filp) == 0)
39113 goto cleanup;
39114@@ -2363,7 +2363,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
39115 cleanup:
39116 if (debug_level >= DEBUG_LEVEL_INFO)
39117 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
39118- tty->driver->name, port->count);
39119+ tty->driver->name, atomic_read(&port->count));
39120 }
39121
39122 /* Wait until the transmitter is empty.
39123@@ -2505,7 +2505,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
39124
39125 if (debug_level >= DEBUG_LEVEL_INFO)
39126 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
39127- __FILE__, __LINE__, tty->driver->name, port->count);
39128+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
39129
39130 /* If port is closing, signal caller to try again */
39131 if (port->flags & ASYNC_CLOSING){
39132@@ -2525,11 +2525,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
39133 goto cleanup;
39134 }
39135 spin_lock(&port->lock);
39136- port->count++;
39137+ atomic_inc(&port->count);
39138 spin_unlock(&port->lock);
39139 spin_unlock_irqrestore(&info->netlock, flags);
39140
39141- if (port->count == 1) {
39142+ if (atomic_read(&port->count) == 1) {
39143 /* 1st open on this device, init hardware */
39144 retval = startup(info, tty);
39145 if (retval < 0)
39146@@ -3918,7 +3918,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
39147 unsigned short new_crctype;
39148
39149 /* return error if TTY interface open */
39150- if (info->port.count)
39151+ if (atomic_read(&info->port.count))
39152 return -EBUSY;
39153
39154 switch (encoding)
39155@@ -4022,7 +4022,7 @@ static int hdlcdev_open(struct net_device *dev)
39156
39157 /* arbitrate between network and tty opens */
39158 spin_lock_irqsave(&info->netlock, flags);
39159- if (info->port.count != 0 || info->netcount != 0) {
39160+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
39161 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
39162 spin_unlock_irqrestore(&info->netlock, flags);
39163 return -EBUSY;
39164@@ -4112,7 +4112,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
39165 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
39166
39167 /* return error if TTY interface open */
39168- if (info->port.count)
39169+ if (atomic_read(&info->port.count))
39170 return -EBUSY;
39171
39172 if (cmd != SIOCWANDEV)
39173diff --git a/drivers/char/random.c b/drivers/char/random.c
39174index 9cd6968..6416f00 100644
39175--- a/drivers/char/random.c
39176+++ b/drivers/char/random.c
39177@@ -289,9 +289,6 @@
39178 /*
39179 * To allow fractional bits to be tracked, the entropy_count field is
39180 * denominated in units of 1/8th bits.
39181- *
39182- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
39183- * credit_entropy_bits() needs to be 64 bits wide.
39184 */
39185 #define ENTROPY_SHIFT 3
39186 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
39187@@ -439,9 +436,9 @@ struct entropy_store {
39188 };
39189
39190 static void push_to_pool(struct work_struct *work);
39191-static __u32 input_pool_data[INPUT_POOL_WORDS];
39192-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
39193-static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
39194+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
39195+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
39196+static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
39197
39198 static struct entropy_store input_pool = {
39199 .poolinfo = &poolinfo_table[0],
39200@@ -635,7 +632,7 @@ retry:
39201 /* The +2 corresponds to the /4 in the denominator */
39202
39203 do {
39204- unsigned int anfrac = min(pnfrac, pool_size/2);
39205+ u64 anfrac = min(pnfrac, pool_size/2);
39206 unsigned int add =
39207 ((pool_size - entropy_count)*anfrac*3) >> s;
39208
39209@@ -1207,7 +1204,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
39210
39211 extract_buf(r, tmp);
39212 i = min_t(int, nbytes, EXTRACT_SIZE);
39213- if (copy_to_user(buf, tmp, i)) {
39214+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
39215 ret = -EFAULT;
39216 break;
39217 }
39218@@ -1590,7 +1587,7 @@ static char sysctl_bootid[16];
39219 static int proc_do_uuid(struct ctl_table *table, int write,
39220 void __user *buffer, size_t *lenp, loff_t *ppos)
39221 {
39222- struct ctl_table fake_table;
39223+ ctl_table_no_const fake_table;
39224 unsigned char buf[64], tmp_uuid[16], *uuid;
39225
39226 uuid = table->data;
39227@@ -1620,7 +1617,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
39228 static int proc_do_entropy(struct ctl_table *table, int write,
39229 void __user *buffer, size_t *lenp, loff_t *ppos)
39230 {
39231- struct ctl_table fake_table;
39232+ ctl_table_no_const fake_table;
39233 int entropy_count;
39234
39235 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
39236diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
39237index e496dae..3db53b6 100644
39238--- a/drivers/char/sonypi.c
39239+++ b/drivers/char/sonypi.c
39240@@ -54,6 +54,7 @@
39241
39242 #include <asm/uaccess.h>
39243 #include <asm/io.h>
39244+#include <asm/local.h>
39245
39246 #include <linux/sonypi.h>
39247
39248@@ -490,7 +491,7 @@ static struct sonypi_device {
39249 spinlock_t fifo_lock;
39250 wait_queue_head_t fifo_proc_list;
39251 struct fasync_struct *fifo_async;
39252- int open_count;
39253+ local_t open_count;
39254 int model;
39255 struct input_dev *input_jog_dev;
39256 struct input_dev *input_key_dev;
39257@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
39258 static int sonypi_misc_release(struct inode *inode, struct file *file)
39259 {
39260 mutex_lock(&sonypi_device.lock);
39261- sonypi_device.open_count--;
39262+ local_dec(&sonypi_device.open_count);
39263 mutex_unlock(&sonypi_device.lock);
39264 return 0;
39265 }
39266@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
39267 {
39268 mutex_lock(&sonypi_device.lock);
39269 /* Flush input queue on first open */
39270- if (!sonypi_device.open_count)
39271+ if (!local_read(&sonypi_device.open_count))
39272 kfifo_reset(&sonypi_device.fifo);
39273- sonypi_device.open_count++;
39274+ local_inc(&sonypi_device.open_count);
39275 mutex_unlock(&sonypi_device.lock);
39276
39277 return 0;
39278@@ -1491,7 +1492,7 @@ static struct platform_driver sonypi_driver = {
39279
39280 static struct platform_device *sonypi_platform_device;
39281
39282-static struct dmi_system_id __initdata sonypi_dmi_table[] = {
39283+static const struct dmi_system_id __initconst sonypi_dmi_table[] = {
39284 {
39285 .ident = "Sony Vaio",
39286 .matches = {
39287diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
39288index 565a947..dcdc06e 100644
39289--- a/drivers/char/tpm/tpm_acpi.c
39290+++ b/drivers/char/tpm/tpm_acpi.c
39291@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
39292 virt = acpi_os_map_iomem(start, len);
39293 if (!virt) {
39294 kfree(log->bios_event_log);
39295+ log->bios_event_log = NULL;
39296 printk("%s: ERROR - Unable to map memory\n", __func__);
39297 return -EIO;
39298 }
39299
39300- memcpy_fromio(log->bios_event_log, virt, len);
39301+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
39302
39303 acpi_os_unmap_iomem(virt, len);
39304 return 0;
39305diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
39306index 3a56a13..f8cbd25 100644
39307--- a/drivers/char/tpm/tpm_eventlog.c
39308+++ b/drivers/char/tpm/tpm_eventlog.c
39309@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
39310 event = addr;
39311
39312 if ((event->event_type == 0 && event->event_size == 0) ||
39313- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
39314+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
39315 return NULL;
39316
39317 return addr;
39318@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
39319 return NULL;
39320
39321 if ((event->event_type == 0 && event->event_size == 0) ||
39322- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
39323+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
39324 return NULL;
39325
39326 (*pos)++;
39327@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
39328 int i;
39329
39330 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
39331- seq_putc(m, data[i]);
39332+ if (!seq_putc(m, data[i]))
39333+ return -EFAULT;
39334
39335 return 0;
39336 }
39337diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
39338index 72d7028..1586601 100644
39339--- a/drivers/char/virtio_console.c
39340+++ b/drivers/char/virtio_console.c
39341@@ -685,7 +685,7 @@ static ssize_t fill_readbuf(struct port *port, char __user *out_buf,
39342 if (to_user) {
39343 ssize_t ret;
39344
39345- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
39346+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
39347 if (ret)
39348 return -EFAULT;
39349 } else {
39350@@ -789,7 +789,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
39351 if (!port_has_data(port) && !port->host_connected)
39352 return 0;
39353
39354- return fill_readbuf(port, ubuf, count, true);
39355+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
39356 }
39357
39358 static int wait_port_writable(struct port *port, bool nonblock)
39359diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
39360index 956b7e5..b655045 100644
39361--- a/drivers/clk/clk-composite.c
39362+++ b/drivers/clk/clk-composite.c
39363@@ -197,7 +197,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
39364 struct clk *clk;
39365 struct clk_init_data init;
39366 struct clk_composite *composite;
39367- struct clk_ops *clk_composite_ops;
39368+ clk_ops_no_const *clk_composite_ops;
39369
39370 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
39371 if (!composite) {
39372diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
39373index dd3a78c..386d49c 100644
39374--- a/drivers/clk/socfpga/clk-gate.c
39375+++ b/drivers/clk/socfpga/clk-gate.c
39376@@ -22,6 +22,7 @@
39377 #include <linux/mfd/syscon.h>
39378 #include <linux/of.h>
39379 #include <linux/regmap.h>
39380+#include <asm/pgtable.h>
39381
39382 #include "clk.h"
39383
39384@@ -174,7 +175,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk)
39385 return 0;
39386 }
39387
39388-static struct clk_ops gateclk_ops = {
39389+static clk_ops_no_const gateclk_ops __read_only = {
39390 .prepare = socfpga_clk_prepare,
39391 .recalc_rate = socfpga_clk_recalc_rate,
39392 .get_parent = socfpga_clk_get_parent,
39393@@ -208,8 +209,10 @@ static void __init __socfpga_gate_init(struct device_node *node,
39394 socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
39395 socfpga_clk->hw.bit_idx = clk_gate[1];
39396
39397- gateclk_ops.enable = clk_gate_ops.enable;
39398- gateclk_ops.disable = clk_gate_ops.disable;
39399+ pax_open_kernel();
39400+ *(void **)&gateclk_ops.enable = clk_gate_ops.enable;
39401+ *(void **)&gateclk_ops.disable = clk_gate_ops.disable;
39402+ pax_close_kernel();
39403 }
39404
39405 rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
39406diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
39407index de6da95..c98278b 100644
39408--- a/drivers/clk/socfpga/clk-pll.c
39409+++ b/drivers/clk/socfpga/clk-pll.c
39410@@ -21,6 +21,7 @@
39411 #include <linux/io.h>
39412 #include <linux/of.h>
39413 #include <linux/of_address.h>
39414+#include <asm/pgtable.h>
39415
39416 #include "clk.h"
39417
39418@@ -76,7 +77,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk)
39419 CLK_MGR_PLL_CLK_SRC_MASK;
39420 }
39421
39422-static struct clk_ops clk_pll_ops = {
39423+static clk_ops_no_const clk_pll_ops __read_only = {
39424 .recalc_rate = clk_pll_recalc_rate,
39425 .get_parent = clk_pll_get_parent,
39426 };
39427@@ -120,8 +121,10 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
39428 pll_clk->hw.hw.init = &init;
39429
39430 pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
39431- clk_pll_ops.enable = clk_gate_ops.enable;
39432- clk_pll_ops.disable = clk_gate_ops.disable;
39433+ pax_open_kernel();
39434+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
39435+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
39436+ pax_close_kernel();
39437
39438 clk = clk_register(NULL, &pll_clk->hw.hw);
39439 if (WARN_ON(IS_ERR(clk))) {
39440diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
39441index b0c18ed..1713a80 100644
39442--- a/drivers/cpufreq/acpi-cpufreq.c
39443+++ b/drivers/cpufreq/acpi-cpufreq.c
39444@@ -675,8 +675,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39445 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
39446 per_cpu(acfreq_data, cpu) = data;
39447
39448- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
39449- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39450+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
39451+ pax_open_kernel();
39452+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39453+ pax_close_kernel();
39454+ }
39455
39456 result = acpi_processor_register_performance(data->acpi_data, cpu);
39457 if (result)
39458@@ -809,7 +812,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39459 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
39460 break;
39461 case ACPI_ADR_SPACE_FIXED_HARDWARE:
39462- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39463+ pax_open_kernel();
39464+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39465+ pax_close_kernel();
39466 break;
39467 default:
39468 break;
39469@@ -903,8 +908,10 @@ static void __init acpi_cpufreq_boost_init(void)
39470 if (!msrs)
39471 return;
39472
39473- acpi_cpufreq_driver.boost_supported = true;
39474- acpi_cpufreq_driver.boost_enabled = boost_state(0);
39475+ pax_open_kernel();
39476+ *(bool *)&acpi_cpufreq_driver.boost_supported = true;
39477+ *(bool *)&acpi_cpufreq_driver.boost_enabled = boost_state(0);
39478+ pax_close_kernel();
39479
39480 cpu_notifier_register_begin();
39481
39482diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
39483index bab67db..91af7e3 100644
39484--- a/drivers/cpufreq/cpufreq-dt.c
39485+++ b/drivers/cpufreq/cpufreq-dt.c
39486@@ -392,7 +392,9 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
39487 if (!IS_ERR(cpu_reg))
39488 regulator_put(cpu_reg);
39489
39490- dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39491+ pax_open_kernel();
39492+ *(void **)&dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39493+ pax_close_kernel();
39494
39495 ret = cpufreq_register_driver(&dt_cpufreq_driver);
39496 if (ret)
39497diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
39498index 8ae655c..3141442 100644
39499--- a/drivers/cpufreq/cpufreq.c
39500+++ b/drivers/cpufreq/cpufreq.c
39501@@ -2108,7 +2108,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
39502 }
39503
39504 mutex_lock(&cpufreq_governor_mutex);
39505- list_del(&governor->governor_list);
39506+ pax_list_del(&governor->governor_list);
39507 mutex_unlock(&cpufreq_governor_mutex);
39508 return;
39509 }
39510@@ -2323,7 +2323,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
39511 return NOTIFY_OK;
39512 }
39513
39514-static struct notifier_block __refdata cpufreq_cpu_notifier = {
39515+static struct notifier_block cpufreq_cpu_notifier = {
39516 .notifier_call = cpufreq_cpu_callback,
39517 };
39518
39519@@ -2363,13 +2363,17 @@ int cpufreq_boost_trigger_state(int state)
39520 return 0;
39521
39522 write_lock_irqsave(&cpufreq_driver_lock, flags);
39523- cpufreq_driver->boost_enabled = state;
39524+ pax_open_kernel();
39525+ *(bool *)&cpufreq_driver->boost_enabled = state;
39526+ pax_close_kernel();
39527 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39528
39529 ret = cpufreq_driver->set_boost(state);
39530 if (ret) {
39531 write_lock_irqsave(&cpufreq_driver_lock, flags);
39532- cpufreq_driver->boost_enabled = !state;
39533+ pax_open_kernel();
39534+ *(bool *)&cpufreq_driver->boost_enabled = !state;
39535+ pax_close_kernel();
39536 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39537
39538 pr_err("%s: Cannot %s BOOST\n",
39539@@ -2434,16 +2438,22 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
39540 cpufreq_driver = driver_data;
39541 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39542
39543- if (driver_data->setpolicy)
39544- driver_data->flags |= CPUFREQ_CONST_LOOPS;
39545+ if (driver_data->setpolicy) {
39546+ pax_open_kernel();
39547+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
39548+ pax_close_kernel();
39549+ }
39550
39551 if (cpufreq_boost_supported()) {
39552 /*
39553 * Check if driver provides function to enable boost -
39554 * if not, use cpufreq_boost_set_sw as default
39555 */
39556- if (!cpufreq_driver->set_boost)
39557- cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39558+ if (!cpufreq_driver->set_boost) {
39559+ pax_open_kernel();
39560+ *(void **)&cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39561+ pax_close_kernel();
39562+ }
39563
39564 ret = cpufreq_sysfs_create_file(&boost.attr);
39565 if (ret) {
39566diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
39567index 1b44496..b80ff5e 100644
39568--- a/drivers/cpufreq/cpufreq_governor.c
39569+++ b/drivers/cpufreq/cpufreq_governor.c
39570@@ -245,7 +245,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39571 struct dbs_data *dbs_data;
39572 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
39573 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
39574- struct od_ops *od_ops = NULL;
39575+ const struct od_ops *od_ops = NULL;
39576 struct od_dbs_tuners *od_tuners = NULL;
39577 struct cs_dbs_tuners *cs_tuners = NULL;
39578 struct cpu_dbs_common_info *cpu_cdbs;
39579@@ -311,7 +311,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39580
39581 if ((cdata->governor == GOV_CONSERVATIVE) &&
39582 (!policy->governor->initialized)) {
39583- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39584+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39585
39586 cpufreq_register_notifier(cs_ops->notifier_block,
39587 CPUFREQ_TRANSITION_NOTIFIER);
39588@@ -331,7 +331,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39589
39590 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
39591 (policy->governor->initialized == 1)) {
39592- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39593+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39594
39595 cpufreq_unregister_notifier(cs_ops->notifier_block,
39596 CPUFREQ_TRANSITION_NOTIFIER);
39597diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
39598index cc401d1..8197340 100644
39599--- a/drivers/cpufreq/cpufreq_governor.h
39600+++ b/drivers/cpufreq/cpufreq_governor.h
39601@@ -212,7 +212,7 @@ struct common_dbs_data {
39602 void (*exit)(struct dbs_data *dbs_data);
39603
39604 /* Governor specific ops, see below */
39605- void *gov_ops;
39606+ const void *gov_ops;
39607 };
39608
39609 /* Governor Per policy data */
39610@@ -232,7 +232,7 @@ struct od_ops {
39611 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
39612 unsigned int freq_next, unsigned int relation);
39613 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
39614-};
39615+} __no_const;
39616
39617 struct cs_ops {
39618 struct notifier_block *notifier_block;
39619diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
39620index ad3f38f..8f086cd 100644
39621--- a/drivers/cpufreq/cpufreq_ondemand.c
39622+++ b/drivers/cpufreq/cpufreq_ondemand.c
39623@@ -524,7 +524,7 @@ static void od_exit(struct dbs_data *dbs_data)
39624
39625 define_get_cpu_dbs_routines(od_cpu_dbs_info);
39626
39627-static struct od_ops od_ops = {
39628+static struct od_ops od_ops __read_only = {
39629 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
39630 .powersave_bias_target = generic_powersave_bias_target,
39631 .freq_increase = dbs_freq_increase,
39632@@ -579,14 +579,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
39633 (struct cpufreq_policy *, unsigned int, unsigned int),
39634 unsigned int powersave_bias)
39635 {
39636- od_ops.powersave_bias_target = f;
39637+ pax_open_kernel();
39638+ *(void **)&od_ops.powersave_bias_target = f;
39639+ pax_close_kernel();
39640 od_set_powersave_bias(powersave_bias);
39641 }
39642 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
39643
39644 void od_unregister_powersave_bias_handler(void)
39645 {
39646- od_ops.powersave_bias_target = generic_powersave_bias_target;
39647+ pax_open_kernel();
39648+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
39649+ pax_close_kernel();
39650 od_set_powersave_bias(0);
39651 }
39652 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
39653diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
39654index 872c577..5fb3c20 100644
39655--- a/drivers/cpufreq/intel_pstate.c
39656+++ b/drivers/cpufreq/intel_pstate.c
39657@@ -133,10 +133,10 @@ struct pstate_funcs {
39658 struct cpu_defaults {
39659 struct pstate_adjust_policy pid_policy;
39660 struct pstate_funcs funcs;
39661-};
39662+} __do_const;
39663
39664 static struct pstate_adjust_policy pid_params;
39665-static struct pstate_funcs pstate_funcs;
39666+static struct pstate_funcs *pstate_funcs;
39667 static int hwp_active;
39668
39669 struct perf_limits {
39670@@ -690,18 +690,18 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
39671
39672 cpu->pstate.current_pstate = pstate;
39673
39674- pstate_funcs.set(cpu, pstate);
39675+ pstate_funcs->set(cpu, pstate);
39676 }
39677
39678 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
39679 {
39680- cpu->pstate.min_pstate = pstate_funcs.get_min();
39681- cpu->pstate.max_pstate = pstate_funcs.get_max();
39682- cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
39683- cpu->pstate.scaling = pstate_funcs.get_scaling();
39684+ cpu->pstate.min_pstate = pstate_funcs->get_min();
39685+ cpu->pstate.max_pstate = pstate_funcs->get_max();
39686+ cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
39687+ cpu->pstate.scaling = pstate_funcs->get_scaling();
39688
39689- if (pstate_funcs.get_vid)
39690- pstate_funcs.get_vid(cpu);
39691+ if (pstate_funcs->get_vid)
39692+ pstate_funcs->get_vid(cpu);
39693 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
39694 }
39695
39696@@ -1030,9 +1030,9 @@ static int intel_pstate_msrs_not_valid(void)
39697 rdmsrl(MSR_IA32_APERF, aperf);
39698 rdmsrl(MSR_IA32_MPERF, mperf);
39699
39700- if (!pstate_funcs.get_max() ||
39701- !pstate_funcs.get_min() ||
39702- !pstate_funcs.get_turbo())
39703+ if (!pstate_funcs->get_max() ||
39704+ !pstate_funcs->get_min() ||
39705+ !pstate_funcs->get_turbo())
39706 return -ENODEV;
39707
39708 rdmsrl(MSR_IA32_APERF, tmp);
39709@@ -1046,7 +1046,7 @@ static int intel_pstate_msrs_not_valid(void)
39710 return 0;
39711 }
39712
39713-static void copy_pid_params(struct pstate_adjust_policy *policy)
39714+static void copy_pid_params(const struct pstate_adjust_policy *policy)
39715 {
39716 pid_params.sample_rate_ms = policy->sample_rate_ms;
39717 pid_params.p_gain_pct = policy->p_gain_pct;
39718@@ -1058,12 +1058,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
39719
39720 static void copy_cpu_funcs(struct pstate_funcs *funcs)
39721 {
39722- pstate_funcs.get_max = funcs->get_max;
39723- pstate_funcs.get_min = funcs->get_min;
39724- pstate_funcs.get_turbo = funcs->get_turbo;
39725- pstate_funcs.get_scaling = funcs->get_scaling;
39726- pstate_funcs.set = funcs->set;
39727- pstate_funcs.get_vid = funcs->get_vid;
39728+ pstate_funcs = funcs;
39729 }
39730
39731 #if IS_ENABLED(CONFIG_ACPI)
39732diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
39733index 529cfd9..0e28fff 100644
39734--- a/drivers/cpufreq/p4-clockmod.c
39735+++ b/drivers/cpufreq/p4-clockmod.c
39736@@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39737 case 0x0F: /* Core Duo */
39738 case 0x16: /* Celeron Core */
39739 case 0x1C: /* Atom */
39740- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39741+ pax_open_kernel();
39742+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39743+ pax_close_kernel();
39744 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
39745 case 0x0D: /* Pentium M (Dothan) */
39746- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39747+ pax_open_kernel();
39748+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39749+ pax_close_kernel();
39750 /* fall through */
39751 case 0x09: /* Pentium M (Banias) */
39752 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
39753@@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39754
39755 /* on P-4s, the TSC runs with constant frequency independent whether
39756 * throttling is active or not. */
39757- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39758+ pax_open_kernel();
39759+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39760+ pax_close_kernel();
39761
39762 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
39763 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
39764diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
39765index 9bb42ba..b01b4a2 100644
39766--- a/drivers/cpufreq/sparc-us3-cpufreq.c
39767+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
39768@@ -18,14 +18,12 @@
39769 #include <asm/head.h>
39770 #include <asm/timer.h>
39771
39772-static struct cpufreq_driver *cpufreq_us3_driver;
39773-
39774 struct us3_freq_percpu_info {
39775 struct cpufreq_frequency_table table[4];
39776 };
39777
39778 /* Indexed by cpu number. */
39779-static struct us3_freq_percpu_info *us3_freq_table;
39780+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
39781
39782 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
39783 * in the Safari config register.
39784@@ -156,16 +154,27 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
39785
39786 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
39787 {
39788- if (cpufreq_us3_driver)
39789- us3_freq_target(policy, 0);
39790+ us3_freq_target(policy, 0);
39791
39792 return 0;
39793 }
39794
39795+static int __init us3_freq_init(void);
39796+static void __exit us3_freq_exit(void);
39797+
39798+static struct cpufreq_driver cpufreq_us3_driver = {
39799+ .init = us3_freq_cpu_init,
39800+ .verify = cpufreq_generic_frequency_table_verify,
39801+ .target_index = us3_freq_target,
39802+ .get = us3_freq_get,
39803+ .exit = us3_freq_cpu_exit,
39804+ .name = "UltraSPARC-III",
39805+
39806+};
39807+
39808 static int __init us3_freq_init(void)
39809 {
39810 unsigned long manuf, impl, ver;
39811- int ret;
39812
39813 if (tlb_type != cheetah && tlb_type != cheetah_plus)
39814 return -ENODEV;
39815@@ -178,55 +187,15 @@ static int __init us3_freq_init(void)
39816 (impl == CHEETAH_IMPL ||
39817 impl == CHEETAH_PLUS_IMPL ||
39818 impl == JAGUAR_IMPL ||
39819- impl == PANTHER_IMPL)) {
39820- struct cpufreq_driver *driver;
39821-
39822- ret = -ENOMEM;
39823- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
39824- if (!driver)
39825- goto err_out;
39826-
39827- us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
39828- GFP_KERNEL);
39829- if (!us3_freq_table)
39830- goto err_out;
39831-
39832- driver->init = us3_freq_cpu_init;
39833- driver->verify = cpufreq_generic_frequency_table_verify;
39834- driver->target_index = us3_freq_target;
39835- driver->get = us3_freq_get;
39836- driver->exit = us3_freq_cpu_exit;
39837- strcpy(driver->name, "UltraSPARC-III");
39838-
39839- cpufreq_us3_driver = driver;
39840- ret = cpufreq_register_driver(driver);
39841- if (ret)
39842- goto err_out;
39843-
39844- return 0;
39845-
39846-err_out:
39847- if (driver) {
39848- kfree(driver);
39849- cpufreq_us3_driver = NULL;
39850- }
39851- kfree(us3_freq_table);
39852- us3_freq_table = NULL;
39853- return ret;
39854- }
39855+ impl == PANTHER_IMPL))
39856+ return cpufreq_register_driver(&cpufreq_us3_driver);
39857
39858 return -ENODEV;
39859 }
39860
39861 static void __exit us3_freq_exit(void)
39862 {
39863- if (cpufreq_us3_driver) {
39864- cpufreq_unregister_driver(cpufreq_us3_driver);
39865- kfree(cpufreq_us3_driver);
39866- cpufreq_us3_driver = NULL;
39867- kfree(us3_freq_table);
39868- us3_freq_table = NULL;
39869- }
39870+ cpufreq_unregister_driver(&cpufreq_us3_driver);
39871 }
39872
39873 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
39874diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
39875index 7d4a315..21bb886 100644
39876--- a/drivers/cpufreq/speedstep-centrino.c
39877+++ b/drivers/cpufreq/speedstep-centrino.c
39878@@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
39879 !cpu_has(cpu, X86_FEATURE_EST))
39880 return -ENODEV;
39881
39882- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
39883- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39884+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
39885+ pax_open_kernel();
39886+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39887+ pax_close_kernel();
39888+ }
39889
39890 if (policy->cpu != 0)
39891 return -ENODEV;
39892diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
39893index 2697e87..c32476c 100644
39894--- a/drivers/cpuidle/driver.c
39895+++ b/drivers/cpuidle/driver.c
39896@@ -194,7 +194,7 @@ static int poll_idle(struct cpuidle_device *dev,
39897
39898 static void poll_idle_init(struct cpuidle_driver *drv)
39899 {
39900- struct cpuidle_state *state = &drv->states[0];
39901+ cpuidle_state_no_const *state = &drv->states[0];
39902
39903 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
39904 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
39905diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
39906index fb9f511..213e6cc 100644
39907--- a/drivers/cpuidle/governor.c
39908+++ b/drivers/cpuidle/governor.c
39909@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
39910 mutex_lock(&cpuidle_lock);
39911 if (__cpuidle_find_governor(gov->name) == NULL) {
39912 ret = 0;
39913- list_add_tail(&gov->governor_list, &cpuidle_governors);
39914+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
39915 if (!cpuidle_curr_governor ||
39916 cpuidle_curr_governor->rating < gov->rating)
39917 cpuidle_switch_governor(gov);
39918diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
39919index 832a2c3..1794080 100644
39920--- a/drivers/cpuidle/sysfs.c
39921+++ b/drivers/cpuidle/sysfs.c
39922@@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
39923 NULL
39924 };
39925
39926-static struct attribute_group cpuidle_attr_group = {
39927+static attribute_group_no_const cpuidle_attr_group = {
39928 .attrs = cpuidle_default_attrs,
39929 .name = "cpuidle",
39930 };
39931diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
39932index 8d2a772..33826c9 100644
39933--- a/drivers/crypto/hifn_795x.c
39934+++ b/drivers/crypto/hifn_795x.c
39935@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
39936 MODULE_PARM_DESC(hifn_pll_ref,
39937 "PLL reference clock (pci[freq] or ext[freq], default ext)");
39938
39939-static atomic_t hifn_dev_number;
39940+static atomic_unchecked_t hifn_dev_number;
39941
39942 #define ACRYPTO_OP_DECRYPT 0
39943 #define ACRYPTO_OP_ENCRYPT 1
39944@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
39945 goto err_out_disable_pci_device;
39946
39947 snprintf(name, sizeof(name), "hifn%d",
39948- atomic_inc_return(&hifn_dev_number)-1);
39949+ atomic_inc_return_unchecked(&hifn_dev_number)-1);
39950
39951 err = pci_request_regions(pdev, name);
39952 if (err)
39953diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
39954index 30b538d8..1610d75 100644
39955--- a/drivers/devfreq/devfreq.c
39956+++ b/drivers/devfreq/devfreq.c
39957@@ -673,7 +673,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
39958 goto err_out;
39959 }
39960
39961- list_add(&governor->node, &devfreq_governor_list);
39962+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
39963
39964 list_for_each_entry(devfreq, &devfreq_list, node) {
39965 int ret = 0;
39966@@ -761,7 +761,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
39967 }
39968 }
39969
39970- list_del(&governor->node);
39971+ pax_list_del((struct list_head *)&governor->node);
39972 err_out:
39973 mutex_unlock(&devfreq_list_lock);
39974
39975diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
39976index 8ee383d..736b5de 100644
39977--- a/drivers/dma/sh/shdma-base.c
39978+++ b/drivers/dma/sh/shdma-base.c
39979@@ -228,8 +228,8 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
39980 schan->slave_id = -EINVAL;
39981 }
39982
39983- schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
39984- sdev->desc_size, GFP_KERNEL);
39985+ schan->desc = kcalloc(sdev->desc_size,
39986+ NR_DESCS_PER_CHANNEL, GFP_KERNEL);
39987 if (!schan->desc) {
39988 ret = -ENOMEM;
39989 goto edescalloc;
39990diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
39991index 9f1d4c7..fceff78 100644
39992--- a/drivers/dma/sh/shdmac.c
39993+++ b/drivers/dma/sh/shdmac.c
39994@@ -513,7 +513,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
39995 return ret;
39996 }
39997
39998-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
39999+static struct notifier_block sh_dmae_nmi_notifier = {
40000 .notifier_call = sh_dmae_nmi_handler,
40001
40002 /* Run before NMI debug handler and KGDB */
40003diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
40004index 592af5f..bb1d583 100644
40005--- a/drivers/edac/edac_device.c
40006+++ b/drivers/edac/edac_device.c
40007@@ -477,9 +477,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
40008 */
40009 int edac_device_alloc_index(void)
40010 {
40011- static atomic_t device_indexes = ATOMIC_INIT(0);
40012+ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
40013
40014- return atomic_inc_return(&device_indexes) - 1;
40015+ return atomic_inc_return_unchecked(&device_indexes) - 1;
40016 }
40017 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
40018
40019diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
40020index c84eecb..4d7381d 100644
40021--- a/drivers/edac/edac_mc_sysfs.c
40022+++ b/drivers/edac/edac_mc_sysfs.c
40023@@ -154,7 +154,7 @@ static const char * const edac_caps[] = {
40024 struct dev_ch_attribute {
40025 struct device_attribute attr;
40026 int channel;
40027-};
40028+} __do_const;
40029
40030 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
40031 static struct dev_ch_attribute dev_attr_legacy_##_name = \
40032@@ -1009,15 +1009,17 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
40033 }
40034
40035 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
40036+ pax_open_kernel();
40037 if (mci->get_sdram_scrub_rate) {
40038- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
40039- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
40040+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
40041+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
40042 }
40043
40044 if (mci->set_sdram_scrub_rate) {
40045- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
40046- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
40047+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
40048+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
40049 }
40050+ pax_close_kernel();
40051
40052 err = device_create_file(&mci->dev, &dev_attr_sdram_scrub_rate);
40053 if (err) {
40054diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
40055index 2cf44b4d..6dd2dc7 100644
40056--- a/drivers/edac/edac_pci.c
40057+++ b/drivers/edac/edac_pci.c
40058@@ -29,7 +29,7 @@
40059
40060 static DEFINE_MUTEX(edac_pci_ctls_mutex);
40061 static LIST_HEAD(edac_pci_list);
40062-static atomic_t pci_indexes = ATOMIC_INIT(0);
40063+static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
40064
40065 /*
40066 * edac_pci_alloc_ctl_info
40067@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
40068 */
40069 int edac_pci_alloc_index(void)
40070 {
40071- return atomic_inc_return(&pci_indexes) - 1;
40072+ return atomic_inc_return_unchecked(&pci_indexes) - 1;
40073 }
40074 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
40075
40076diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
40077index 24d877f..4e30133 100644
40078--- a/drivers/edac/edac_pci_sysfs.c
40079+++ b/drivers/edac/edac_pci_sysfs.c
40080@@ -23,8 +23,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
40081 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
40082 static int edac_pci_poll_msec = 1000; /* one second workq period */
40083
40084-static atomic_t pci_parity_count = ATOMIC_INIT(0);
40085-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
40086+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
40087+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
40088
40089 static struct kobject *edac_pci_top_main_kobj;
40090 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
40091@@ -232,7 +232,7 @@ struct edac_pci_dev_attribute {
40092 void *value;
40093 ssize_t(*show) (void *, char *);
40094 ssize_t(*store) (void *, const char *, size_t);
40095-};
40096+} __do_const;
40097
40098 /* Set of show/store abstract level functions for PCI Parity object */
40099 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
40100@@ -576,7 +576,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40101 edac_printk(KERN_CRIT, EDAC_PCI,
40102 "Signaled System Error on %s\n",
40103 pci_name(dev));
40104- atomic_inc(&pci_nonparity_count);
40105+ atomic_inc_unchecked(&pci_nonparity_count);
40106 }
40107
40108 if (status & (PCI_STATUS_PARITY)) {
40109@@ -584,7 +584,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40110 "Master Data Parity Error on %s\n",
40111 pci_name(dev));
40112
40113- atomic_inc(&pci_parity_count);
40114+ atomic_inc_unchecked(&pci_parity_count);
40115 }
40116
40117 if (status & (PCI_STATUS_DETECTED_PARITY)) {
40118@@ -592,7 +592,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40119 "Detected Parity Error on %s\n",
40120 pci_name(dev));
40121
40122- atomic_inc(&pci_parity_count);
40123+ atomic_inc_unchecked(&pci_parity_count);
40124 }
40125 }
40126
40127@@ -615,7 +615,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40128 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
40129 "Signaled System Error on %s\n",
40130 pci_name(dev));
40131- atomic_inc(&pci_nonparity_count);
40132+ atomic_inc_unchecked(&pci_nonparity_count);
40133 }
40134
40135 if (status & (PCI_STATUS_PARITY)) {
40136@@ -623,7 +623,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40137 "Master Data Parity Error on "
40138 "%s\n", pci_name(dev));
40139
40140- atomic_inc(&pci_parity_count);
40141+ atomic_inc_unchecked(&pci_parity_count);
40142 }
40143
40144 if (status & (PCI_STATUS_DETECTED_PARITY)) {
40145@@ -631,7 +631,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40146 "Detected Parity Error on %s\n",
40147 pci_name(dev));
40148
40149- atomic_inc(&pci_parity_count);
40150+ atomic_inc_unchecked(&pci_parity_count);
40151 }
40152 }
40153 }
40154@@ -669,7 +669,7 @@ void edac_pci_do_parity_check(void)
40155 if (!check_pci_errors)
40156 return;
40157
40158- before_count = atomic_read(&pci_parity_count);
40159+ before_count = atomic_read_unchecked(&pci_parity_count);
40160
40161 /* scan all PCI devices looking for a Parity Error on devices and
40162 * bridges.
40163@@ -681,7 +681,7 @@ void edac_pci_do_parity_check(void)
40164 /* Only if operator has selected panic on PCI Error */
40165 if (edac_pci_get_panic_on_pe()) {
40166 /* If the count is different 'after' from 'before' */
40167- if (before_count != atomic_read(&pci_parity_count))
40168+ if (before_count != atomic_read_unchecked(&pci_parity_count))
40169 panic("EDAC: PCI Parity Error");
40170 }
40171 }
40172diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
40173index c2359a1..8bd119d 100644
40174--- a/drivers/edac/mce_amd.h
40175+++ b/drivers/edac/mce_amd.h
40176@@ -74,7 +74,7 @@ struct amd_decoder_ops {
40177 bool (*mc0_mce)(u16, u8);
40178 bool (*mc1_mce)(u16, u8);
40179 bool (*mc2_mce)(u16, u8);
40180-};
40181+} __no_const;
40182
40183 void amd_report_gart_errors(bool);
40184 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
40185diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
40186index 57ea7f4..af06b76 100644
40187--- a/drivers/firewire/core-card.c
40188+++ b/drivers/firewire/core-card.c
40189@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
40190 const struct fw_card_driver *driver,
40191 struct device *device)
40192 {
40193- static atomic_t index = ATOMIC_INIT(-1);
40194+ static atomic_unchecked_t index = ATOMIC_INIT(-1);
40195
40196- card->index = atomic_inc_return(&index);
40197+ card->index = atomic_inc_return_unchecked(&index);
40198 card->driver = driver;
40199 card->device = device;
40200 card->current_tlabel = 0;
40201@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
40202
40203 void fw_core_remove_card(struct fw_card *card)
40204 {
40205- struct fw_card_driver dummy_driver = dummy_driver_template;
40206+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
40207
40208 card->driver->update_phy_reg(card, 4,
40209 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
40210diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
40211index f9e3aee..269dbdb 100644
40212--- a/drivers/firewire/core-device.c
40213+++ b/drivers/firewire/core-device.c
40214@@ -256,7 +256,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
40215 struct config_rom_attribute {
40216 struct device_attribute attr;
40217 u32 key;
40218-};
40219+} __do_const;
40220
40221 static ssize_t show_immediate(struct device *dev,
40222 struct device_attribute *dattr, char *buf)
40223diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
40224index d6a09b9..18e90dd 100644
40225--- a/drivers/firewire/core-transaction.c
40226+++ b/drivers/firewire/core-transaction.c
40227@@ -38,6 +38,7 @@
40228 #include <linux/timer.h>
40229 #include <linux/types.h>
40230 #include <linux/workqueue.h>
40231+#include <linux/sched.h>
40232
40233 #include <asm/byteorder.h>
40234
40235diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
40236index e1480ff6..1a429bd 100644
40237--- a/drivers/firewire/core.h
40238+++ b/drivers/firewire/core.h
40239@@ -111,6 +111,7 @@ struct fw_card_driver {
40240
40241 int (*stop_iso)(struct fw_iso_context *ctx);
40242 };
40243+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
40244
40245 void fw_card_initialize(struct fw_card *card,
40246 const struct fw_card_driver *driver, struct device *device);
40247diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
40248index f51d376..b118e40 100644
40249--- a/drivers/firewire/ohci.c
40250+++ b/drivers/firewire/ohci.c
40251@@ -2049,10 +2049,12 @@ static void bus_reset_work(struct work_struct *work)
40252 be32_to_cpu(ohci->next_header));
40253 }
40254
40255+#ifndef CONFIG_GRKERNSEC
40256 if (param_remote_dma) {
40257 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
40258 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
40259 }
40260+#endif
40261
40262 spin_unlock_irq(&ohci->lock);
40263
40264@@ -2584,8 +2586,10 @@ static int ohci_enable_phys_dma(struct fw_card *card,
40265 unsigned long flags;
40266 int n, ret = 0;
40267
40268+#ifndef CONFIG_GRKERNSEC
40269 if (param_remote_dma)
40270 return 0;
40271+#endif
40272
40273 /*
40274 * FIXME: Make sure this bitmask is cleared when we clear the busReset
40275diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
40276index 94a58a0..f5eba42 100644
40277--- a/drivers/firmware/dmi-id.c
40278+++ b/drivers/firmware/dmi-id.c
40279@@ -16,7 +16,7 @@
40280 struct dmi_device_attribute{
40281 struct device_attribute dev_attr;
40282 int field;
40283-};
40284+} __do_const;
40285 #define to_dmi_dev_attr(_dev_attr) \
40286 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
40287
40288diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
40289index 2eebd28b..4261350 100644
40290--- a/drivers/firmware/dmi_scan.c
40291+++ b/drivers/firmware/dmi_scan.c
40292@@ -893,7 +893,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
40293 if (buf == NULL)
40294 return -1;
40295
40296- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
40297+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
40298
40299 dmi_unmap(buf);
40300 return 0;
40301diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
40302index 4fd9961..52d60ce 100644
40303--- a/drivers/firmware/efi/cper.c
40304+++ b/drivers/firmware/efi/cper.c
40305@@ -44,12 +44,12 @@ static char rcd_decode_str[CPER_REC_LEN];
40306 */
40307 u64 cper_next_record_id(void)
40308 {
40309- static atomic64_t seq;
40310+ static atomic64_unchecked_t seq;
40311
40312- if (!atomic64_read(&seq))
40313- atomic64_set(&seq, ((u64)get_seconds()) << 32);
40314+ if (!atomic64_read_unchecked(&seq))
40315+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
40316
40317- return atomic64_inc_return(&seq);
40318+ return atomic64_inc_return_unchecked(&seq);
40319 }
40320 EXPORT_SYMBOL_GPL(cper_next_record_id);
40321
40322diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
40323index 3061bb8..92b5fcc 100644
40324--- a/drivers/firmware/efi/efi.c
40325+++ b/drivers/firmware/efi/efi.c
40326@@ -160,14 +160,16 @@ static struct attribute_group efi_subsys_attr_group = {
40327 };
40328
40329 static struct efivars generic_efivars;
40330-static struct efivar_operations generic_ops;
40331+static efivar_operations_no_const generic_ops __read_only;
40332
40333 static int generic_ops_register(void)
40334 {
40335- generic_ops.get_variable = efi.get_variable;
40336- generic_ops.set_variable = efi.set_variable;
40337- generic_ops.get_next_variable = efi.get_next_variable;
40338- generic_ops.query_variable_store = efi_query_variable_store;
40339+ pax_open_kernel();
40340+ *(void **)&generic_ops.get_variable = efi.get_variable;
40341+ *(void **)&generic_ops.set_variable = efi.set_variable;
40342+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
40343+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
40344+ pax_close_kernel();
40345
40346 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
40347 }
40348diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
40349index 7b2e049..a253334 100644
40350--- a/drivers/firmware/efi/efivars.c
40351+++ b/drivers/firmware/efi/efivars.c
40352@@ -589,7 +589,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
40353 static int
40354 create_efivars_bin_attributes(void)
40355 {
40356- struct bin_attribute *attr;
40357+ bin_attribute_no_const *attr;
40358 int error;
40359
40360 /* new_var */
40361diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
40362index 87b8e3b..c4afb35 100644
40363--- a/drivers/firmware/efi/runtime-map.c
40364+++ b/drivers/firmware/efi/runtime-map.c
40365@@ -97,7 +97,7 @@ static void map_release(struct kobject *kobj)
40366 kfree(entry);
40367 }
40368
40369-static struct kobj_type __refdata map_ktype = {
40370+static const struct kobj_type __refconst map_ktype = {
40371 .sysfs_ops = &map_attr_ops,
40372 .default_attrs = def_attrs,
40373 .release = map_release,
40374diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
40375index f1ab05e..ab51228 100644
40376--- a/drivers/firmware/google/gsmi.c
40377+++ b/drivers/firmware/google/gsmi.c
40378@@ -709,7 +709,7 @@ static u32 __init hash_oem_table_id(char s[8])
40379 return local_hash_64(input, 32);
40380 }
40381
40382-static struct dmi_system_id gsmi_dmi_table[] __initdata = {
40383+static const struct dmi_system_id gsmi_dmi_table[] __initconst = {
40384 {
40385 .ident = "Google Board",
40386 .matches = {
40387diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
40388index 2f569aa..26e4f39 100644
40389--- a/drivers/firmware/google/memconsole.c
40390+++ b/drivers/firmware/google/memconsole.c
40391@@ -136,7 +136,7 @@ static bool __init found_memconsole(void)
40392 return false;
40393 }
40394
40395-static struct dmi_system_id memconsole_dmi_table[] __initdata = {
40396+static const struct dmi_system_id memconsole_dmi_table[] __initconst = {
40397 {
40398 .ident = "Google Board",
40399 .matches = {
40400@@ -155,7 +155,10 @@ static int __init memconsole_init(void)
40401 if (!found_memconsole())
40402 return -ENODEV;
40403
40404- memconsole_bin_attr.size = memconsole_length;
40405+ pax_open_kernel();
40406+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
40407+ pax_close_kernel();
40408+
40409 return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
40410 }
40411
40412diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
40413index cc016c61..d35279e 100644
40414--- a/drivers/firmware/memmap.c
40415+++ b/drivers/firmware/memmap.c
40416@@ -124,7 +124,7 @@ static void __meminit release_firmware_map_entry(struct kobject *kobj)
40417 kfree(entry);
40418 }
40419
40420-static struct kobj_type __refdata memmap_ktype = {
40421+static const struct kobj_type __refconst memmap_ktype = {
40422 .release = release_firmware_map_entry,
40423 .sysfs_ops = &memmap_attr_ops,
40424 .default_attrs = def_attrs,
40425diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
40426index 3cfcfc6..09d6f117 100644
40427--- a/drivers/gpio/gpio-em.c
40428+++ b/drivers/gpio/gpio-em.c
40429@@ -278,7 +278,7 @@ static int em_gio_probe(struct platform_device *pdev)
40430 struct em_gio_priv *p;
40431 struct resource *io[2], *irq[2];
40432 struct gpio_chip *gpio_chip;
40433- struct irq_chip *irq_chip;
40434+ irq_chip_no_const *irq_chip;
40435 const char *name = dev_name(&pdev->dev);
40436 int ret;
40437
40438diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
40439index 7818cd1..1be40e5 100644
40440--- a/drivers/gpio/gpio-ich.c
40441+++ b/drivers/gpio/gpio-ich.c
40442@@ -94,7 +94,7 @@ struct ichx_desc {
40443 * this option allows driver caching written output values
40444 */
40445 bool use_outlvl_cache;
40446-};
40447+} __do_const;
40448
40449 static struct {
40450 spinlock_t lock;
40451diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
40452index f476ae2..05e1bdd 100644
40453--- a/drivers/gpio/gpio-omap.c
40454+++ b/drivers/gpio/gpio-omap.c
40455@@ -1188,7 +1188,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
40456 const struct omap_gpio_platform_data *pdata;
40457 struct resource *res;
40458 struct gpio_bank *bank;
40459- struct irq_chip *irqc;
40460+ irq_chip_no_const *irqc;
40461 int ret;
40462
40463 match = of_match_device(of_match_ptr(omap_gpio_match), dev);
40464diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
40465index c49522e..9a7ee54 100644
40466--- a/drivers/gpio/gpio-rcar.c
40467+++ b/drivers/gpio/gpio-rcar.c
40468@@ -348,7 +348,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
40469 struct gpio_rcar_priv *p;
40470 struct resource *io, *irq;
40471 struct gpio_chip *gpio_chip;
40472- struct irq_chip *irq_chip;
40473+ irq_chip_no_const *irq_chip;
40474 struct device *dev = &pdev->dev;
40475 const char *name = dev_name(dev);
40476 int ret;
40477diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
40478index c1caa45..f0f97d2 100644
40479--- a/drivers/gpio/gpio-vr41xx.c
40480+++ b/drivers/gpio/gpio-vr41xx.c
40481@@ -224,7 +224,7 @@ static int giu_get_irq(unsigned int irq)
40482 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
40483 maskl, pendl, maskh, pendh);
40484
40485- atomic_inc(&irq_err_count);
40486+ atomic_inc_unchecked(&irq_err_count);
40487
40488 return -EINVAL;
40489 }
40490diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
40491index 1ca9295..9f3d481 100644
40492--- a/drivers/gpio/gpiolib.c
40493+++ b/drivers/gpio/gpiolib.c
40494@@ -554,8 +554,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
40495 }
40496
40497 if (gpiochip->irqchip) {
40498- gpiochip->irqchip->irq_request_resources = NULL;
40499- gpiochip->irqchip->irq_release_resources = NULL;
40500+ pax_open_kernel();
40501+ *(void **)&gpiochip->irqchip->irq_request_resources = NULL;
40502+ *(void **)&gpiochip->irqchip->irq_release_resources = NULL;
40503+ pax_close_kernel();
40504 gpiochip->irqchip = NULL;
40505 }
40506 }
40507@@ -621,8 +623,11 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
40508 gpiochip->irqchip = NULL;
40509 return -EINVAL;
40510 }
40511- irqchip->irq_request_resources = gpiochip_irq_reqres;
40512- irqchip->irq_release_resources = gpiochip_irq_relres;
40513+
40514+ pax_open_kernel();
40515+ *(void **)&irqchip->irq_request_resources = gpiochip_irq_reqres;
40516+ *(void **)&irqchip->irq_release_resources = gpiochip_irq_relres;
40517+ pax_close_kernel();
40518
40519 /*
40520 * Prepare the mapping since the irqchip shall be orthogonal to
40521diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
40522index 488f51d..301d462 100644
40523--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
40524+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
40525@@ -118,7 +118,7 @@ struct device_queue_manager_ops {
40526 enum cache_policy alternate_policy,
40527 void __user *alternate_aperture_base,
40528 uint64_t alternate_aperture_size);
40529-};
40530+} __no_const;
40531
40532 /**
40533 * struct device_queue_manager
40534diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
40535index 5940531..a75b0e5 100644
40536--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
40537+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
40538@@ -62,7 +62,7 @@ struct kernel_queue_ops {
40539
40540 void (*submit_packet)(struct kernel_queue *kq);
40541 void (*rollback_packet)(struct kernel_queue *kq);
40542-};
40543+} __no_const;
40544
40545 struct kernel_queue {
40546 struct kernel_queue_ops ops;
40547diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
40548index 9b23525..09af26c 100644
40549--- a/drivers/gpu/drm/drm_context.c
40550+++ b/drivers/gpu/drm/drm_context.c
40551@@ -53,6 +53,9 @@ struct drm_ctx_list {
40552 */
40553 void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
40554 {
40555+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40556+ return -EINVAL;
40557+
40558 mutex_lock(&dev->struct_mutex);
40559 idr_remove(&dev->ctx_idr, ctx_handle);
40560 mutex_unlock(&dev->struct_mutex);
40561@@ -87,6 +90,9 @@ static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
40562 */
40563 int drm_legacy_ctxbitmap_init(struct drm_device * dev)
40564 {
40565+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40566+ return -EINVAL;
40567+
40568 idr_init(&dev->ctx_idr);
40569 return 0;
40570 }
40571@@ -101,6 +107,9 @@ int drm_legacy_ctxbitmap_init(struct drm_device * dev)
40572 */
40573 void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
40574 {
40575+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40576+ return -EINVAL;
40577+
40578 mutex_lock(&dev->struct_mutex);
40579 idr_destroy(&dev->ctx_idr);
40580 mutex_unlock(&dev->struct_mutex);
40581@@ -119,11 +128,14 @@ void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
40582 {
40583 struct drm_ctx_list *pos, *tmp;
40584
40585+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40586+ return -EINVAL;
40587+
40588 mutex_lock(&dev->ctxlist_mutex);
40589
40590 list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) {
40591 if (pos->tag == file &&
40592- pos->handle != DRM_KERNEL_CONTEXT) {
40593+ _DRM_LOCKING_CONTEXT(pos->handle) != DRM_KERNEL_CONTEXT) {
40594 if (dev->driver->context_dtor)
40595 dev->driver->context_dtor(dev, pos->handle);
40596
40597@@ -161,6 +173,9 @@ int drm_legacy_getsareactx(struct drm_device *dev, void *data,
40598 struct drm_local_map *map;
40599 struct drm_map_list *_entry;
40600
40601+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40602+ return -EINVAL;
40603+
40604 mutex_lock(&dev->struct_mutex);
40605
40606 map = idr_find(&dev->ctx_idr, request->ctx_id);
40607@@ -205,6 +220,9 @@ int drm_legacy_setsareactx(struct drm_device *dev, void *data,
40608 struct drm_local_map *map = NULL;
40609 struct drm_map_list *r_list = NULL;
40610
40611+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40612+ return -EINVAL;
40613+
40614 mutex_lock(&dev->struct_mutex);
40615 list_for_each_entry(r_list, &dev->maplist, head) {
40616 if (r_list->map
40617@@ -277,7 +295,13 @@ static int drm_context_switch_complete(struct drm_device *dev,
40618 {
40619 dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
40620
40621- if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
40622+ if (file_priv->master->lock.hw_lock == NULL) {
40623+ DRM_ERROR(
40624+ "Device has been unregistered. Hard exit. Process %d\n",
40625+ task_pid_nr(current));
40626+ send_sig(SIGTERM, current, 0);
40627+ return -EPERM;
40628+ } else if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
40629 DRM_ERROR("Lock isn't held after context switch\n");
40630 }
40631
40632@@ -305,6 +329,9 @@ int drm_legacy_resctx(struct drm_device *dev, void *data,
40633 struct drm_ctx ctx;
40634 int i;
40635
40636+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40637+ return -EINVAL;
40638+
40639 if (res->count >= DRM_RESERVED_CONTEXTS) {
40640 memset(&ctx, 0, sizeof(ctx));
40641 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
40642@@ -335,8 +362,11 @@ int drm_legacy_addctx(struct drm_device *dev, void *data,
40643 struct drm_ctx_list *ctx_entry;
40644 struct drm_ctx *ctx = data;
40645
40646+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40647+ return -EINVAL;
40648+
40649 ctx->handle = drm_legacy_ctxbitmap_next(dev);
40650- if (ctx->handle == DRM_KERNEL_CONTEXT) {
40651+ if (_DRM_LOCKING_CONTEXT(ctx->handle) == DRM_KERNEL_CONTEXT) {
40652 /* Skip kernel's context and get a new one. */
40653 ctx->handle = drm_legacy_ctxbitmap_next(dev);
40654 }
40655@@ -378,6 +408,9 @@ int drm_legacy_getctx(struct drm_device *dev, void *data,
40656 {
40657 struct drm_ctx *ctx = data;
40658
40659+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40660+ return -EINVAL;
40661+
40662 /* This is 0, because we don't handle any context flags */
40663 ctx->flags = 0;
40664
40665@@ -400,6 +433,9 @@ int drm_legacy_switchctx(struct drm_device *dev, void *data,
40666 {
40667 struct drm_ctx *ctx = data;
40668
40669+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40670+ return -EINVAL;
40671+
40672 DRM_DEBUG("%d\n", ctx->handle);
40673 return drm_context_switch(dev, dev->last_context, ctx->handle);
40674 }
40675@@ -420,6 +456,9 @@ int drm_legacy_newctx(struct drm_device *dev, void *data,
40676 {
40677 struct drm_ctx *ctx = data;
40678
40679+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40680+ return -EINVAL;
40681+
40682 DRM_DEBUG("%d\n", ctx->handle);
40683 drm_context_switch_complete(dev, file_priv, ctx->handle);
40684
40685@@ -442,8 +481,11 @@ int drm_legacy_rmctx(struct drm_device *dev, void *data,
40686 {
40687 struct drm_ctx *ctx = data;
40688
40689+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40690+ return -EINVAL;
40691+
40692 DRM_DEBUG("%d\n", ctx->handle);
40693- if (ctx->handle != DRM_KERNEL_CONTEXT) {
40694+ if (_DRM_LOCKING_CONTEXT(ctx->handle) != DRM_KERNEL_CONTEXT) {
40695 if (dev->driver->context_dtor)
40696 dev->driver->context_dtor(dev, ctx->handle);
40697 drm_legacy_ctxbitmap_free(dev, ctx->handle);
40698diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
40699index b6f076b..2918de2 100644
40700--- a/drivers/gpu/drm/drm_crtc.c
40701+++ b/drivers/gpu/drm/drm_crtc.c
40702@@ -4118,7 +4118,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
40703 goto done;
40704 }
40705
40706- if (copy_to_user(&enum_ptr[copied].name,
40707+ if (copy_to_user(enum_ptr[copied].name,
40708 &prop_enum->name, DRM_PROP_NAME_LEN)) {
40709 ret = -EFAULT;
40710 goto done;
40711diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
40712index d512134..a80a8e4 100644
40713--- a/drivers/gpu/drm/drm_drv.c
40714+++ b/drivers/gpu/drm/drm_drv.c
40715@@ -448,7 +448,7 @@ void drm_unplug_dev(struct drm_device *dev)
40716
40717 drm_device_set_unplugged(dev);
40718
40719- if (dev->open_count == 0) {
40720+ if (local_read(&dev->open_count) == 0) {
40721 drm_put_dev(dev);
40722 }
40723 mutex_unlock(&drm_global_mutex);
40724@@ -596,10 +596,13 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
40725 if (drm_ht_create(&dev->map_hash, 12))
40726 goto err_minors;
40727
40728- ret = drm_legacy_ctxbitmap_init(dev);
40729- if (ret) {
40730- DRM_ERROR("Cannot allocate memory for context bitmap.\n");
40731- goto err_ht;
40732+ if (drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT)) {
40733+ ret = drm_legacy_ctxbitmap_init(dev);
40734+ if (ret) {
40735+ DRM_ERROR(
40736+ "Cannot allocate memory for context bitmap.\n");
40737+ goto err_ht;
40738+ }
40739 }
40740
40741 if (drm_core_check_feature(dev, DRIVER_GEM)) {
40742diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
40743index 076dd60..e4a4ba7 100644
40744--- a/drivers/gpu/drm/drm_fops.c
40745+++ b/drivers/gpu/drm/drm_fops.c
40746@@ -89,7 +89,7 @@ int drm_open(struct inode *inode, struct file *filp)
40747 return PTR_ERR(minor);
40748
40749 dev = minor->dev;
40750- if (!dev->open_count++)
40751+ if (local_inc_return(&dev->open_count) == 1)
40752 need_setup = 1;
40753
40754 /* share address_space across all char-devs of a single device */
40755@@ -106,7 +106,7 @@ int drm_open(struct inode *inode, struct file *filp)
40756 return 0;
40757
40758 err_undo:
40759- dev->open_count--;
40760+ local_dec(&dev->open_count);
40761 drm_minor_release(minor);
40762 return retcode;
40763 }
40764@@ -376,7 +376,7 @@ int drm_release(struct inode *inode, struct file *filp)
40765
40766 mutex_lock(&drm_global_mutex);
40767
40768- DRM_DEBUG("open_count = %d\n", dev->open_count);
40769+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
40770
40771 mutex_lock(&dev->struct_mutex);
40772 list_del(&file_priv->lhead);
40773@@ -389,10 +389,10 @@ int drm_release(struct inode *inode, struct file *filp)
40774 * Begin inline drm_release
40775 */
40776
40777- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
40778+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
40779 task_pid_nr(current),
40780 (long)old_encode_dev(file_priv->minor->kdev->devt),
40781- dev->open_count);
40782+ local_read(&dev->open_count));
40783
40784 /* Release any auth tokens that might point to this file_priv,
40785 (do that under the drm_global_mutex) */
40786@@ -465,7 +465,7 @@ int drm_release(struct inode *inode, struct file *filp)
40787 * End inline drm_release
40788 */
40789
40790- if (!--dev->open_count) {
40791+ if (local_dec_and_test(&dev->open_count)) {
40792 retcode = drm_lastclose(dev);
40793 if (drm_device_is_unplugged(dev))
40794 drm_put_dev(dev);
40795diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
40796index 3d2e91c..d31c4c9 100644
40797--- a/drivers/gpu/drm/drm_global.c
40798+++ b/drivers/gpu/drm/drm_global.c
40799@@ -36,7 +36,7 @@
40800 struct drm_global_item {
40801 struct mutex mutex;
40802 void *object;
40803- int refcount;
40804+ atomic_t refcount;
40805 };
40806
40807 static struct drm_global_item glob[DRM_GLOBAL_NUM];
40808@@ -49,7 +49,7 @@ void drm_global_init(void)
40809 struct drm_global_item *item = &glob[i];
40810 mutex_init(&item->mutex);
40811 item->object = NULL;
40812- item->refcount = 0;
40813+ atomic_set(&item->refcount, 0);
40814 }
40815 }
40816
40817@@ -59,7 +59,7 @@ void drm_global_release(void)
40818 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
40819 struct drm_global_item *item = &glob[i];
40820 BUG_ON(item->object != NULL);
40821- BUG_ON(item->refcount != 0);
40822+ BUG_ON(atomic_read(&item->refcount) != 0);
40823 }
40824 }
40825
40826@@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
40827 struct drm_global_item *item = &glob[ref->global_type];
40828
40829 mutex_lock(&item->mutex);
40830- if (item->refcount == 0) {
40831+ if (atomic_read(&item->refcount) == 0) {
40832 item->object = kzalloc(ref->size, GFP_KERNEL);
40833 if (unlikely(item->object == NULL)) {
40834 ret = -ENOMEM;
40835@@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
40836 goto out_err;
40837
40838 }
40839- ++item->refcount;
40840+ atomic_inc(&item->refcount);
40841 ref->object = item->object;
40842 mutex_unlock(&item->mutex);
40843 return 0;
40844@@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
40845 struct drm_global_item *item = &glob[ref->global_type];
40846
40847 mutex_lock(&item->mutex);
40848- BUG_ON(item->refcount == 0);
40849+ BUG_ON(atomic_read(&item->refcount) == 0);
40850 BUG_ON(ref->object != item->object);
40851- if (--item->refcount == 0) {
40852+ if (atomic_dec_and_test(&item->refcount)) {
40853 ref->release(ref);
40854 item->object = NULL;
40855 }
40856diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
40857index f1b32f9..394f791 100644
40858--- a/drivers/gpu/drm/drm_info.c
40859+++ b/drivers/gpu/drm/drm_info.c
40860@@ -76,10 +76,13 @@ int drm_vm_info(struct seq_file *m, void *data)
40861 struct drm_local_map *map;
40862 struct drm_map_list *r_list;
40863
40864- /* Hardcoded from _DRM_FRAME_BUFFER,
40865- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
40866- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
40867- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
40868+ static const char * const types[] = {
40869+ [_DRM_FRAME_BUFFER] = "FB",
40870+ [_DRM_REGISTERS] = "REG",
40871+ [_DRM_SHM] = "SHM",
40872+ [_DRM_AGP] = "AGP",
40873+ [_DRM_SCATTER_GATHER] = "SG",
40874+ [_DRM_CONSISTENT] = "PCI"};
40875 const char *type;
40876 int i;
40877
40878@@ -90,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
40879 map = r_list->map;
40880 if (!map)
40881 continue;
40882- if (map->type < 0 || map->type > 5)
40883+ if (map->type >= ARRAY_SIZE(types))
40884 type = "??";
40885 else
40886 type = types[map->type];
40887diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
40888index 2f4c4343..dd12cd2 100644
40889--- a/drivers/gpu/drm/drm_ioc32.c
40890+++ b/drivers/gpu/drm/drm_ioc32.c
40891@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
40892 request = compat_alloc_user_space(nbytes);
40893 if (!access_ok(VERIFY_WRITE, request, nbytes))
40894 return -EFAULT;
40895- list = (struct drm_buf_desc *) (request + 1);
40896+ list = (struct drm_buf_desc __user *) (request + 1);
40897
40898 if (__put_user(count, &request->count)
40899 || __put_user(list, &request->list))
40900@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
40901 request = compat_alloc_user_space(nbytes);
40902 if (!access_ok(VERIFY_WRITE, request, nbytes))
40903 return -EFAULT;
40904- list = (struct drm_buf_pub *) (request + 1);
40905+ list = (struct drm_buf_pub __user *) (request + 1);
40906
40907 if (__put_user(count, &request->count)
40908 || __put_user(list, &request->list))
40909@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
40910 return 0;
40911 }
40912
40913-drm_ioctl_compat_t *drm_compat_ioctls[] = {
40914+drm_ioctl_compat_t drm_compat_ioctls[] = {
40915 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
40916 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
40917 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
40918@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
40919 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40920 {
40921 unsigned int nr = DRM_IOCTL_NR(cmd);
40922- drm_ioctl_compat_t *fn;
40923 int ret;
40924
40925 /* Assume that ioctls without an explicit compat routine will just
40926@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40927 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
40928 return drm_ioctl(filp, cmd, arg);
40929
40930- fn = drm_compat_ioctls[nr];
40931-
40932- if (fn != NULL)
40933- ret = (*fn) (filp, cmd, arg);
40934+ if (drm_compat_ioctls[nr] != NULL)
40935+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
40936 else
40937 ret = drm_ioctl(filp, cmd, arg);
40938
40939diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
40940index 3785d66..1c489ef 100644
40941--- a/drivers/gpu/drm/drm_ioctl.c
40942+++ b/drivers/gpu/drm/drm_ioctl.c
40943@@ -655,7 +655,7 @@ long drm_ioctl(struct file *filp,
40944 struct drm_file *file_priv = filp->private_data;
40945 struct drm_device *dev;
40946 const struct drm_ioctl_desc *ioctl = NULL;
40947- drm_ioctl_t *func;
40948+ drm_ioctl_no_const_t func;
40949 unsigned int nr = DRM_IOCTL_NR(cmd);
40950 int retcode = -EINVAL;
40951 char stack_kdata[128];
40952diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
40953index f861361..b61d4c7 100644
40954--- a/drivers/gpu/drm/drm_lock.c
40955+++ b/drivers/gpu/drm/drm_lock.c
40956@@ -61,9 +61,12 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
40957 struct drm_master *master = file_priv->master;
40958 int ret = 0;
40959
40960+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40961+ return -EINVAL;
40962+
40963 ++file_priv->lock_count;
40964
40965- if (lock->context == DRM_KERNEL_CONTEXT) {
40966+ if (_DRM_LOCKING_CONTEXT(lock->context) == DRM_KERNEL_CONTEXT) {
40967 DRM_ERROR("Process %d using kernel context %d\n",
40968 task_pid_nr(current), lock->context);
40969 return -EINVAL;
40970@@ -153,12 +156,23 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
40971 struct drm_lock *lock = data;
40972 struct drm_master *master = file_priv->master;
40973
40974- if (lock->context == DRM_KERNEL_CONTEXT) {
40975+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40976+ return -EINVAL;
40977+
40978+ if (_DRM_LOCKING_CONTEXT(lock->context) == DRM_KERNEL_CONTEXT) {
40979 DRM_ERROR("Process %d using kernel context %d\n",
40980 task_pid_nr(current), lock->context);
40981 return -EINVAL;
40982 }
40983
40984+ if (!master->lock.hw_lock) {
40985+ DRM_ERROR(
40986+ "Device has been unregistered. Hard exit. Process %d\n",
40987+ task_pid_nr(current));
40988+ send_sig(SIGTERM, current, 0);
40989+ return -EPERM;
40990+ }
40991+
40992 if (drm_legacy_lock_free(&master->lock, lock->context)) {
40993 /* FIXME: Should really bail out here. */
40994 }
40995diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
40996index d4813e0..6c1ab4d 100644
40997--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
40998+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
40999@@ -825,10 +825,16 @@ void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
41000 u32 pipeconf_reg = PIPEACONF;
41001 u32 dspcntr_reg = DSPACNTR;
41002
41003- u32 pipeconf = dev_priv->pipeconf[pipe];
41004- u32 dspcntr = dev_priv->dspcntr[pipe];
41005+ u32 pipeconf;
41006+ u32 dspcntr;
41007 u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
41008
41009+ if (pipe == -1)
41010+ return;
41011+
41012+ pipeconf = dev_priv->pipeconf[pipe];
41013+ dspcntr = dev_priv->dspcntr[pipe];
41014+
41015 if (pipe) {
41016 pipeconf_reg = PIPECCONF;
41017 dspcntr_reg = DSPCCNTR;
41018diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
41019index 93ec5dc..82acbaf 100644
41020--- a/drivers/gpu/drm/i810/i810_drv.h
41021+++ b/drivers/gpu/drm/i810/i810_drv.h
41022@@ -110,8 +110,8 @@ typedef struct drm_i810_private {
41023 int page_flipping;
41024
41025 wait_queue_head_t irq_queue;
41026- atomic_t irq_received;
41027- atomic_t irq_emitted;
41028+ atomic_unchecked_t irq_received;
41029+ atomic_unchecked_t irq_emitted;
41030
41031 int front_offset;
41032 } drm_i810_private_t;
41033diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
41034index 1a46787..7fb387c 100644
41035--- a/drivers/gpu/drm/i915/i915_dma.c
41036+++ b/drivers/gpu/drm/i915/i915_dma.c
41037@@ -149,6 +149,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
41038 case I915_PARAM_MMAP_VERSION:
41039 value = 1;
41040 break;
41041+ case I915_PARAM_HAS_LEGACY_CONTEXT:
41042+ value = drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT);
41043+ break;
41044 default:
41045 DRM_DEBUG("Unknown parameter %d\n", param->param);
41046 return -EINVAL;
41047@@ -362,7 +365,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
41048 * locking inversion with the driver load path. And the access here is
41049 * completely racy anyway. So don't bother with locking for now.
41050 */
41051- return dev->open_count == 0;
41052+ return local_read(&dev->open_count) == 0;
41053 }
41054
41055 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
41056diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
41057index 38a7425..5322b16 100644
41058--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
41059+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
41060@@ -872,12 +872,12 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
41061 static int
41062 validate_exec_list(struct drm_device *dev,
41063 struct drm_i915_gem_exec_object2 *exec,
41064- int count)
41065+ unsigned int count)
41066 {
41067 unsigned relocs_total = 0;
41068 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
41069 unsigned invalid_flags;
41070- int i;
41071+ unsigned int i;
41072
41073 invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
41074 if (USES_FULL_PPGTT(dev))
41075diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
41076index 176de63..b50b66a 100644
41077--- a/drivers/gpu/drm/i915/i915_ioc32.c
41078+++ b/drivers/gpu/drm/i915/i915_ioc32.c
41079@@ -62,7 +62,7 @@ static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
41080 || __put_user(batchbuffer32.DR4, &batchbuffer->DR4)
41081 || __put_user(batchbuffer32.num_cliprects,
41082 &batchbuffer->num_cliprects)
41083- || __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,
41084+ || __put_user((struct drm_clip_rect __user *)(unsigned long)batchbuffer32.cliprects,
41085 &batchbuffer->cliprects))
41086 return -EFAULT;
41087
41088@@ -91,13 +91,13 @@ static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
41089
41090 cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));
41091 if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer))
41092- || __put_user((int __user *)(unsigned long)cmdbuffer32.buf,
41093+ || __put_user((char __user *)(unsigned long)cmdbuffer32.buf,
41094 &cmdbuffer->buf)
41095 || __put_user(cmdbuffer32.sz, &cmdbuffer->sz)
41096 || __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1)
41097 || __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4)
41098 || __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects)
41099- || __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects,
41100+ || __put_user((struct drm_clip_rect __user *)(unsigned long)cmdbuffer32.cliprects,
41101 &cmdbuffer->cliprects))
41102 return -EFAULT;
41103
41104@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
41105 (unsigned long)request);
41106 }
41107
41108-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
41109+static drm_ioctl_compat_t i915_compat_ioctls[] = {
41110 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
41111 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
41112 [DRM_I915_GETPARAM] = compat_i915_getparam,
41113@@ -201,17 +201,13 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
41114 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41115 {
41116 unsigned int nr = DRM_IOCTL_NR(cmd);
41117- drm_ioctl_compat_t *fn = NULL;
41118 int ret;
41119
41120 if (nr < DRM_COMMAND_BASE)
41121 return drm_compat_ioctl(filp, cmd, arg);
41122
41123- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
41124- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
41125-
41126- if (fn != NULL)
41127- ret = (*fn) (filp, cmd, arg);
41128+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls) && i915_compat_ioctls[nr - DRM_COMMAND_BASE])
41129+ ret = (*i915_compat_ioctls[nr - DRM_COMMAND_BASE])(filp, cmd, arg);
41130 else
41131 ret = drm_ioctl(filp, cmd, arg);
41132
41133diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
41134index f75173c..f283e45 100644
41135--- a/drivers/gpu/drm/i915/intel_display.c
41136+++ b/drivers/gpu/drm/i915/intel_display.c
41137@@ -13056,13 +13056,13 @@ struct intel_quirk {
41138 int subsystem_vendor;
41139 int subsystem_device;
41140 void (*hook)(struct drm_device *dev);
41141-};
41142+} __do_const;
41143
41144 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
41145 struct intel_dmi_quirk {
41146 void (*hook)(struct drm_device *dev);
41147 const struct dmi_system_id (*dmi_id_list)[];
41148-};
41149+} __do_const;
41150
41151 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
41152 {
41153@@ -13070,18 +13070,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
41154 return 1;
41155 }
41156
41157-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
41158+static const struct dmi_system_id intel_dmi_quirks_table[] = {
41159 {
41160- .dmi_id_list = &(const struct dmi_system_id[]) {
41161- {
41162- .callback = intel_dmi_reverse_brightness,
41163- .ident = "NCR Corporation",
41164- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
41165- DMI_MATCH(DMI_PRODUCT_NAME, ""),
41166- },
41167- },
41168- { } /* terminating entry */
41169+ .callback = intel_dmi_reverse_brightness,
41170+ .ident = "NCR Corporation",
41171+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
41172+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
41173 },
41174+ },
41175+ { } /* terminating entry */
41176+};
41177+
41178+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
41179+ {
41180+ .dmi_id_list = &intel_dmi_quirks_table,
41181 .hook = quirk_invert_brightness,
41182 },
41183 };
41184diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
41185index a002f53..0d60514 100644
41186--- a/drivers/gpu/drm/imx/imx-drm-core.c
41187+++ b/drivers/gpu/drm/imx/imx-drm-core.c
41188@@ -355,7 +355,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
41189 if (imxdrm->pipes >= MAX_CRTC)
41190 return -EINVAL;
41191
41192- if (imxdrm->drm->open_count)
41193+ if (local_read(&imxdrm->drm->open_count))
41194 return -EBUSY;
41195
41196 imx_drm_crtc = kzalloc(sizeof(*imx_drm_crtc), GFP_KERNEL);
41197diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
41198index b4a20149..219ab78 100644
41199--- a/drivers/gpu/drm/mga/mga_drv.h
41200+++ b/drivers/gpu/drm/mga/mga_drv.h
41201@@ -122,9 +122,9 @@ typedef struct drm_mga_private {
41202 u32 clear_cmd;
41203 u32 maccess;
41204
41205- atomic_t vbl_received; /**< Number of vblanks received. */
41206+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
41207 wait_queue_head_t fence_queue;
41208- atomic_t last_fence_retired;
41209+ atomic_unchecked_t last_fence_retired;
41210 u32 next_fence_to_post;
41211
41212 unsigned int fb_cpp;
41213diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
41214index 729bfd5..14bae78 100644
41215--- a/drivers/gpu/drm/mga/mga_ioc32.c
41216+++ b/drivers/gpu/drm/mga/mga_ioc32.c
41217@@ -190,7 +190,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
41218 return 0;
41219 }
41220
41221-drm_ioctl_compat_t *mga_compat_ioctls[] = {
41222+drm_ioctl_compat_t mga_compat_ioctls[] = {
41223 [DRM_MGA_INIT] = compat_mga_init,
41224 [DRM_MGA_GETPARAM] = compat_mga_getparam,
41225 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
41226@@ -208,17 +208,13 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
41227 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41228 {
41229 unsigned int nr = DRM_IOCTL_NR(cmd);
41230- drm_ioctl_compat_t *fn = NULL;
41231 int ret;
41232
41233 if (nr < DRM_COMMAND_BASE)
41234 return drm_compat_ioctl(filp, cmd, arg);
41235
41236- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
41237- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
41238-
41239- if (fn != NULL)
41240- ret = (*fn) (filp, cmd, arg);
41241+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls) && mga_compat_ioctls[nr - DRM_COMMAND_BASE])
41242+ ret = (*mga_compat_ioctls[nr - DRM_COMMAND_BASE]) (filp, cmd, arg);
41243 else
41244 ret = drm_ioctl(filp, cmd, arg);
41245
41246diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
41247index 1b071b8..de8601a 100644
41248--- a/drivers/gpu/drm/mga/mga_irq.c
41249+++ b/drivers/gpu/drm/mga/mga_irq.c
41250@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
41251 if (crtc != 0)
41252 return 0;
41253
41254- return atomic_read(&dev_priv->vbl_received);
41255+ return atomic_read_unchecked(&dev_priv->vbl_received);
41256 }
41257
41258
41259@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
41260 /* VBLANK interrupt */
41261 if (status & MGA_VLINEPEN) {
41262 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
41263- atomic_inc(&dev_priv->vbl_received);
41264+ atomic_inc_unchecked(&dev_priv->vbl_received);
41265 drm_handle_vblank(dev, 0);
41266 handled = 1;
41267 }
41268@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
41269 if ((prim_start & ~0x03) != (prim_end & ~0x03))
41270 MGA_WRITE(MGA_PRIMEND, prim_end);
41271
41272- atomic_inc(&dev_priv->last_fence_retired);
41273+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
41274 wake_up(&dev_priv->fence_queue);
41275 handled = 1;
41276 }
41277@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
41278 * using fences.
41279 */
41280 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
41281- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
41282+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
41283 - *sequence) <= (1 << 23)));
41284
41285 *sequence = cur_fence;
41286diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
41287index 0190b69..60c3eaf 100644
41288--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
41289+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
41290@@ -963,7 +963,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
41291 struct bit_table {
41292 const char id;
41293 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
41294-};
41295+} __no_const;
41296
41297 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
41298
41299diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
41300index 8763deb..936b423 100644
41301--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
41302+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
41303@@ -940,7 +940,8 @@ static struct drm_driver
41304 driver_stub = {
41305 .driver_features =
41306 DRIVER_USE_AGP |
41307- DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
41308+ DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER |
41309+ DRIVER_KMS_LEGACY_CONTEXT,
41310
41311 .load = nouveau_drm_load,
41312 .unload = nouveau_drm_unload,
41313diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
41314index fc68f09..0511d71 100644
41315--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
41316+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
41317@@ -121,7 +121,6 @@ struct nouveau_drm {
41318 struct drm_global_reference mem_global_ref;
41319 struct ttm_bo_global_ref bo_global_ref;
41320 struct ttm_bo_device bdev;
41321- atomic_t validate_sequence;
41322 int (*move)(struct nouveau_channel *,
41323 struct ttm_buffer_object *,
41324 struct ttm_mem_reg *, struct ttm_mem_reg *);
41325diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
41326index 462679a..88e32a7 100644
41327--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
41328+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
41329@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
41330 unsigned long arg)
41331 {
41332 unsigned int nr = DRM_IOCTL_NR(cmd);
41333- drm_ioctl_compat_t *fn = NULL;
41334+ drm_ioctl_compat_t fn = NULL;
41335 int ret;
41336
41337 if (nr < DRM_COMMAND_BASE)
41338diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
41339index 273e501..3b6c0a2 100644
41340--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
41341+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
41342@@ -127,11 +127,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41343 }
41344
41345 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
41346- nouveau_vram_manager_init,
41347- nouveau_vram_manager_fini,
41348- nouveau_vram_manager_new,
41349- nouveau_vram_manager_del,
41350- nouveau_vram_manager_debug
41351+ .init = nouveau_vram_manager_init,
41352+ .takedown = nouveau_vram_manager_fini,
41353+ .get_node = nouveau_vram_manager_new,
41354+ .put_node = nouveau_vram_manager_del,
41355+ .debug = nouveau_vram_manager_debug
41356 };
41357
41358 static int
41359@@ -195,11 +195,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41360 }
41361
41362 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
41363- nouveau_gart_manager_init,
41364- nouveau_gart_manager_fini,
41365- nouveau_gart_manager_new,
41366- nouveau_gart_manager_del,
41367- nouveau_gart_manager_debug
41368+ .init = nouveau_gart_manager_init,
41369+ .takedown = nouveau_gart_manager_fini,
41370+ .get_node = nouveau_gart_manager_new,
41371+ .put_node = nouveau_gart_manager_del,
41372+ .debug = nouveau_gart_manager_debug
41373 };
41374
41375 /*XXX*/
41376@@ -268,11 +268,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41377 }
41378
41379 const struct ttm_mem_type_manager_func nv04_gart_manager = {
41380- nv04_gart_manager_init,
41381- nv04_gart_manager_fini,
41382- nv04_gart_manager_new,
41383- nv04_gart_manager_del,
41384- nv04_gart_manager_debug
41385+ .init = nv04_gart_manager_init,
41386+ .takedown = nv04_gart_manager_fini,
41387+ .get_node = nv04_gart_manager_new,
41388+ .put_node = nv04_gart_manager_del,
41389+ .debug = nv04_gart_manager_debug
41390 };
41391
41392 int
41393diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
41394index c7592ec..dd45ebc 100644
41395--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
41396+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
41397@@ -72,7 +72,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
41398 * locking inversion with the driver load path. And the access here is
41399 * completely racy anyway. So don't bother with locking for now.
41400 */
41401- return dev->open_count == 0;
41402+ return local_read(&dev->open_count) == 0;
41403 }
41404
41405 static const struct vga_switcheroo_client_ops
41406diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
41407index 9782364..89bd954 100644
41408--- a/drivers/gpu/drm/qxl/qxl_cmd.c
41409+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
41410@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
41411 int ret;
41412
41413 mutex_lock(&qdev->async_io_mutex);
41414- irq_num = atomic_read(&qdev->irq_received_io_cmd);
41415+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
41416 if (qdev->last_sent_io_cmd > irq_num) {
41417 if (intr)
41418 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
41419- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41420+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41421 else
41422 ret = wait_event_timeout(qdev->io_cmd_event,
41423- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41424+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41425 /* 0 is timeout, just bail the "hw" has gone away */
41426 if (ret <= 0)
41427 goto out;
41428- irq_num = atomic_read(&qdev->irq_received_io_cmd);
41429+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
41430 }
41431 outb(val, addr);
41432 qdev->last_sent_io_cmd = irq_num + 1;
41433 if (intr)
41434 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
41435- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41436+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41437 else
41438 ret = wait_event_timeout(qdev->io_cmd_event,
41439- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41440+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41441 out:
41442 if (ret > 0)
41443 ret = 0;
41444diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
41445index 6911b8c..89d6867 100644
41446--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
41447+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
41448@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
41449 struct drm_info_node *node = (struct drm_info_node *) m->private;
41450 struct qxl_device *qdev = node->minor->dev->dev_private;
41451
41452- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
41453- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
41454- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
41455- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
41456+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
41457+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
41458+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
41459+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
41460 seq_printf(m, "%d\n", qdev->irq_received_error);
41461 return 0;
41462 }
41463diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
41464index 7c6cafe..460f542 100644
41465--- a/drivers/gpu/drm/qxl/qxl_drv.h
41466+++ b/drivers/gpu/drm/qxl/qxl_drv.h
41467@@ -290,10 +290,10 @@ struct qxl_device {
41468 unsigned int last_sent_io_cmd;
41469
41470 /* interrupt handling */
41471- atomic_t irq_received;
41472- atomic_t irq_received_display;
41473- atomic_t irq_received_cursor;
41474- atomic_t irq_received_io_cmd;
41475+ atomic_unchecked_t irq_received;
41476+ atomic_unchecked_t irq_received_display;
41477+ atomic_unchecked_t irq_received_cursor;
41478+ atomic_unchecked_t irq_received_io_cmd;
41479 unsigned irq_received_error;
41480 wait_queue_head_t display_event;
41481 wait_queue_head_t cursor_event;
41482diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
41483index b110883..dd06418 100644
41484--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
41485+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
41486@@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
41487
41488 /* TODO copy slow path code from i915 */
41489 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
41490- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
41491+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size);
41492
41493 {
41494 struct qxl_drawable *draw = fb_cmd;
41495@@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
41496 struct drm_qxl_reloc reloc;
41497
41498 if (copy_from_user(&reloc,
41499- &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
41500+ &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
41501 sizeof(reloc))) {
41502 ret = -EFAULT;
41503 goto out_free_bos;
41504@@ -294,10 +294,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
41505
41506 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
41507
41508- struct drm_qxl_command *commands =
41509- (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
41510+ struct drm_qxl_command __user *commands =
41511+ (struct drm_qxl_command __user *)(uintptr_t)execbuffer->commands;
41512
41513- if (copy_from_user(&user_cmd, &commands[cmd_num],
41514+ if (copy_from_user(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
41515 sizeof(user_cmd)))
41516 return -EFAULT;
41517
41518diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
41519index 0bf1e20..42a7310 100644
41520--- a/drivers/gpu/drm/qxl/qxl_irq.c
41521+++ b/drivers/gpu/drm/qxl/qxl_irq.c
41522@@ -36,19 +36,19 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
41523 if (!pending)
41524 return IRQ_NONE;
41525
41526- atomic_inc(&qdev->irq_received);
41527+ atomic_inc_unchecked(&qdev->irq_received);
41528
41529 if (pending & QXL_INTERRUPT_DISPLAY) {
41530- atomic_inc(&qdev->irq_received_display);
41531+ atomic_inc_unchecked(&qdev->irq_received_display);
41532 wake_up_all(&qdev->display_event);
41533 qxl_queue_garbage_collect(qdev, false);
41534 }
41535 if (pending & QXL_INTERRUPT_CURSOR) {
41536- atomic_inc(&qdev->irq_received_cursor);
41537+ atomic_inc_unchecked(&qdev->irq_received_cursor);
41538 wake_up_all(&qdev->cursor_event);
41539 }
41540 if (pending & QXL_INTERRUPT_IO_CMD) {
41541- atomic_inc(&qdev->irq_received_io_cmd);
41542+ atomic_inc_unchecked(&qdev->irq_received_io_cmd);
41543 wake_up_all(&qdev->io_cmd_event);
41544 }
41545 if (pending & QXL_INTERRUPT_ERROR) {
41546@@ -85,10 +85,10 @@ int qxl_irq_init(struct qxl_device *qdev)
41547 init_waitqueue_head(&qdev->io_cmd_event);
41548 INIT_WORK(&qdev->client_monitors_config_work,
41549 qxl_client_monitors_config_work_func);
41550- atomic_set(&qdev->irq_received, 0);
41551- atomic_set(&qdev->irq_received_display, 0);
41552- atomic_set(&qdev->irq_received_cursor, 0);
41553- atomic_set(&qdev->irq_received_io_cmd, 0);
41554+ atomic_set_unchecked(&qdev->irq_received, 0);
41555+ atomic_set_unchecked(&qdev->irq_received_display, 0);
41556+ atomic_set_unchecked(&qdev->irq_received_cursor, 0);
41557+ atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
41558 qdev->irq_received_error = 0;
41559 ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq);
41560 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
41561diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
41562index 0cbc4c9..0e46686 100644
41563--- a/drivers/gpu/drm/qxl/qxl_ttm.c
41564+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
41565@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
41566 }
41567 }
41568
41569-static struct vm_operations_struct qxl_ttm_vm_ops;
41570+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
41571 static const struct vm_operations_struct *ttm_vm_ops;
41572
41573 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41574@@ -145,8 +145,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
41575 return r;
41576 if (unlikely(ttm_vm_ops == NULL)) {
41577 ttm_vm_ops = vma->vm_ops;
41578+ pax_open_kernel();
41579 qxl_ttm_vm_ops = *ttm_vm_ops;
41580 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
41581+ pax_close_kernel();
41582 }
41583 vma->vm_ops = &qxl_ttm_vm_ops;
41584 return 0;
41585@@ -464,25 +466,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
41586 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
41587 {
41588 #if defined(CONFIG_DEBUG_FS)
41589- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
41590- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
41591- unsigned i;
41592+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
41593+ {
41594+ .name = "qxl_mem_mm",
41595+ .show = &qxl_mm_dump_table,
41596+ },
41597+ {
41598+ .name = "qxl_surf_mm",
41599+ .show = &qxl_mm_dump_table,
41600+ }
41601+ };
41602
41603- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
41604- if (i == 0)
41605- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
41606- else
41607- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
41608- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
41609- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
41610- qxl_mem_types_list[i].driver_features = 0;
41611- if (i == 0)
41612- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41613- else
41614- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41615+ pax_open_kernel();
41616+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41617+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41618+ pax_close_kernel();
41619
41620- }
41621- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
41622+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
41623 #else
41624 return 0;
41625 #endif
41626diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
41627index 2c45ac9..5d740f8 100644
41628--- a/drivers/gpu/drm/r128/r128_cce.c
41629+++ b/drivers/gpu/drm/r128/r128_cce.c
41630@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
41631
41632 /* GH: Simple idle check.
41633 */
41634- atomic_set(&dev_priv->idle_count, 0);
41635+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41636
41637 /* We don't support anything other than bus-mastering ring mode,
41638 * but the ring can be in either AGP or PCI space for the ring
41639diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
41640index 723e5d6..102dbaf 100644
41641--- a/drivers/gpu/drm/r128/r128_drv.h
41642+++ b/drivers/gpu/drm/r128/r128_drv.h
41643@@ -93,14 +93,14 @@ typedef struct drm_r128_private {
41644 int is_pci;
41645 unsigned long cce_buffers_offset;
41646
41647- atomic_t idle_count;
41648+ atomic_unchecked_t idle_count;
41649
41650 int page_flipping;
41651 int current_page;
41652 u32 crtc_offset;
41653 u32 crtc_offset_cntl;
41654
41655- atomic_t vbl_received;
41656+ atomic_unchecked_t vbl_received;
41657
41658 u32 color_fmt;
41659 unsigned int front_offset;
41660diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
41661index 663f38c..ec159a1 100644
41662--- a/drivers/gpu/drm/r128/r128_ioc32.c
41663+++ b/drivers/gpu/drm/r128/r128_ioc32.c
41664@@ -178,7 +178,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
41665 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
41666 }
41667
41668-drm_ioctl_compat_t *r128_compat_ioctls[] = {
41669+drm_ioctl_compat_t r128_compat_ioctls[] = {
41670 [DRM_R128_INIT] = compat_r128_init,
41671 [DRM_R128_DEPTH] = compat_r128_depth,
41672 [DRM_R128_STIPPLE] = compat_r128_stipple,
41673@@ -197,17 +197,13 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
41674 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41675 {
41676 unsigned int nr = DRM_IOCTL_NR(cmd);
41677- drm_ioctl_compat_t *fn = NULL;
41678 int ret;
41679
41680 if (nr < DRM_COMMAND_BASE)
41681 return drm_compat_ioctl(filp, cmd, arg);
41682
41683- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls))
41684- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
41685-
41686- if (fn != NULL)
41687- ret = (*fn) (filp, cmd, arg);
41688+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls) && r128_compat_ioctls[nr - DRM_COMMAND_BASE])
41689+ ret = (*r128_compat_ioctls[nr - DRM_COMMAND_BASE]) (filp, cmd, arg);
41690 else
41691 ret = drm_ioctl(filp, cmd, arg);
41692
41693diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
41694index c2ae496..30b5993 100644
41695--- a/drivers/gpu/drm/r128/r128_irq.c
41696+++ b/drivers/gpu/drm/r128/r128_irq.c
41697@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
41698 if (crtc != 0)
41699 return 0;
41700
41701- return atomic_read(&dev_priv->vbl_received);
41702+ return atomic_read_unchecked(&dev_priv->vbl_received);
41703 }
41704
41705 irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41706@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41707 /* VBLANK interrupt */
41708 if (status & R128_CRTC_VBLANK_INT) {
41709 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
41710- atomic_inc(&dev_priv->vbl_received);
41711+ atomic_inc_unchecked(&dev_priv->vbl_received);
41712 drm_handle_vblank(dev, 0);
41713 return IRQ_HANDLED;
41714 }
41715diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
41716index 8fd2d9f..18c9660 100644
41717--- a/drivers/gpu/drm/r128/r128_state.c
41718+++ b/drivers/gpu/drm/r128/r128_state.c
41719@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
41720
41721 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
41722 {
41723- if (atomic_read(&dev_priv->idle_count) == 0)
41724+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
41725 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
41726 else
41727- atomic_set(&dev_priv->idle_count, 0);
41728+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41729 }
41730
41731 #endif
41732diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
41733index b928c17..e5d9400 100644
41734--- a/drivers/gpu/drm/radeon/mkregtable.c
41735+++ b/drivers/gpu/drm/radeon/mkregtable.c
41736@@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
41737 regex_t mask_rex;
41738 regmatch_t match[4];
41739 char buf[1024];
41740- size_t end;
41741+ long end;
41742 int len;
41743 int done = 0;
41744 int r;
41745 unsigned o;
41746 struct offset *offset;
41747 char last_reg_s[10];
41748- int last_reg;
41749+ unsigned long last_reg;
41750
41751 if (regcomp
41752 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
41753diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
41754index bd7519f..e1c2cd95 100644
41755--- a/drivers/gpu/drm/radeon/radeon_device.c
41756+++ b/drivers/gpu/drm/radeon/radeon_device.c
41757@@ -1247,7 +1247,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
41758 * locking inversion with the driver load path. And the access here is
41759 * completely racy anyway. So don't bother with locking for now.
41760 */
41761- return dev->open_count == 0;
41762+ return local_read(&dev->open_count) == 0;
41763 }
41764
41765 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
41766diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
41767index 46bd393..6ae4719 100644
41768--- a/drivers/gpu/drm/radeon/radeon_drv.h
41769+++ b/drivers/gpu/drm/radeon/radeon_drv.h
41770@@ -264,7 +264,7 @@ typedef struct drm_radeon_private {
41771
41772 /* SW interrupt */
41773 wait_queue_head_t swi_queue;
41774- atomic_t swi_emitted;
41775+ atomic_unchecked_t swi_emitted;
41776 int vblank_crtc;
41777 uint32_t irq_enable_reg;
41778 uint32_t r500_disp_irq_reg;
41779diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
41780index 0b98ea1..a3c770f 100644
41781--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
41782+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
41783@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
41784 request = compat_alloc_user_space(sizeof(*request));
41785 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
41786 || __put_user(req32.param, &request->param)
41787- || __put_user((void __user *)(unsigned long)req32.value,
41788+ || __put_user((unsigned long)req32.value,
41789 &request->value))
41790 return -EFAULT;
41791
41792@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
41793 #define compat_radeon_cp_setparam NULL
41794 #endif /* X86_64 || IA64 */
41795
41796-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
41797+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
41798 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
41799 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
41800 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
41801@@ -393,17 +393,13 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
41802 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41803 {
41804 unsigned int nr = DRM_IOCTL_NR(cmd);
41805- drm_ioctl_compat_t *fn = NULL;
41806 int ret;
41807
41808 if (nr < DRM_COMMAND_BASE)
41809 return drm_compat_ioctl(filp, cmd, arg);
41810
41811- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls))
41812- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
41813-
41814- if (fn != NULL)
41815- ret = (*fn) (filp, cmd, arg);
41816+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls) && radeon_compat_ioctls[nr - DRM_COMMAND_BASE])
41817+ ret = (*radeon_compat_ioctls[nr - DRM_COMMAND_BASE]) (filp, cmd, arg);
41818 else
41819 ret = drm_ioctl(filp, cmd, arg);
41820
41821diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
41822index 244b19b..c19226d 100644
41823--- a/drivers/gpu/drm/radeon/radeon_irq.c
41824+++ b/drivers/gpu/drm/radeon/radeon_irq.c
41825@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
41826 unsigned int ret;
41827 RING_LOCALS;
41828
41829- atomic_inc(&dev_priv->swi_emitted);
41830- ret = atomic_read(&dev_priv->swi_emitted);
41831+ atomic_inc_unchecked(&dev_priv->swi_emitted);
41832+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
41833
41834 BEGIN_RING(4);
41835 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
41836@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
41837 drm_radeon_private_t *dev_priv =
41838 (drm_radeon_private_t *) dev->dev_private;
41839
41840- atomic_set(&dev_priv->swi_emitted, 0);
41841+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
41842 init_waitqueue_head(&dev_priv->swi_queue);
41843
41844 dev->max_vblank_count = 0x001fffff;
41845diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
41846index 15aee72..cda326e 100644
41847--- a/drivers/gpu/drm/radeon/radeon_state.c
41848+++ b/drivers/gpu/drm/radeon/radeon_state.c
41849@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
41850 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
41851 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
41852
41853- if (copy_from_user(&depth_boxes, clear->depth_boxes,
41854+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || copy_from_user(&depth_boxes, clear->depth_boxes,
41855 sarea_priv->nbox * sizeof(depth_boxes[0])))
41856 return -EFAULT;
41857
41858@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
41859 {
41860 drm_radeon_private_t *dev_priv = dev->dev_private;
41861 drm_radeon_getparam_t *param = data;
41862- int value;
41863+ int value = 0;
41864
41865 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
41866
41867diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
41868index edafd3c..3af7c9c 100644
41869--- a/drivers/gpu/drm/radeon/radeon_ttm.c
41870+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
41871@@ -961,7 +961,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
41872 man->size = size >> PAGE_SHIFT;
41873 }
41874
41875-static struct vm_operations_struct radeon_ttm_vm_ops;
41876+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
41877 static const struct vm_operations_struct *ttm_vm_ops = NULL;
41878
41879 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41880@@ -1002,8 +1002,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
41881 }
41882 if (unlikely(ttm_vm_ops == NULL)) {
41883 ttm_vm_ops = vma->vm_ops;
41884+ pax_open_kernel();
41885 radeon_ttm_vm_ops = *ttm_vm_ops;
41886 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
41887+ pax_close_kernel();
41888 }
41889 vma->vm_ops = &radeon_ttm_vm_ops;
41890 return 0;
41891diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
41892index 1a52522..8e78043 100644
41893--- a/drivers/gpu/drm/tegra/dc.c
41894+++ b/drivers/gpu/drm/tegra/dc.c
41895@@ -1585,7 +1585,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
41896 }
41897
41898 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
41899- dc->debugfs_files[i].data = dc;
41900+ *(void **)&dc->debugfs_files[i].data = dc;
41901
41902 err = drm_debugfs_create_files(dc->debugfs_files,
41903 ARRAY_SIZE(debugfs_files),
41904diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
41905index ed970f6..4eeea42 100644
41906--- a/drivers/gpu/drm/tegra/dsi.c
41907+++ b/drivers/gpu/drm/tegra/dsi.c
41908@@ -62,7 +62,7 @@ struct tegra_dsi {
41909 struct clk *clk_lp;
41910 struct clk *clk;
41911
41912- struct drm_info_list *debugfs_files;
41913+ drm_info_list_no_const *debugfs_files;
41914 struct drm_minor *minor;
41915 struct dentry *debugfs;
41916
41917diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
41918index 7eaaee74..cc2bc04 100644
41919--- a/drivers/gpu/drm/tegra/hdmi.c
41920+++ b/drivers/gpu/drm/tegra/hdmi.c
41921@@ -64,7 +64,7 @@ struct tegra_hdmi {
41922 bool stereo;
41923 bool dvi;
41924
41925- struct drm_info_list *debugfs_files;
41926+ drm_info_list_no_const *debugfs_files;
41927 struct drm_minor *minor;
41928 struct dentry *debugfs;
41929 };
41930diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41931index aa0bd054..aea6a01 100644
41932--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
41933+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41934@@ -148,10 +148,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
41935 }
41936
41937 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
41938- ttm_bo_man_init,
41939- ttm_bo_man_takedown,
41940- ttm_bo_man_get_node,
41941- ttm_bo_man_put_node,
41942- ttm_bo_man_debug
41943+ .init = ttm_bo_man_init,
41944+ .takedown = ttm_bo_man_takedown,
41945+ .get_node = ttm_bo_man_get_node,
41946+ .put_node = ttm_bo_man_put_node,
41947+ .debug = ttm_bo_man_debug
41948 };
41949 EXPORT_SYMBOL(ttm_bo_manager_func);
41950diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
41951index a1803fb..c53f6b0 100644
41952--- a/drivers/gpu/drm/ttm/ttm_memory.c
41953+++ b/drivers/gpu/drm/ttm/ttm_memory.c
41954@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
41955 zone->glob = glob;
41956 glob->zone_kernel = zone;
41957 ret = kobject_init_and_add(
41958- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41959+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41960 if (unlikely(ret != 0)) {
41961 kobject_put(&zone->kobj);
41962 return ret;
41963@@ -348,7 +348,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
41964 zone->glob = glob;
41965 glob->zone_dma32 = zone;
41966 ret = kobject_init_and_add(
41967- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41968+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41969 if (unlikely(ret != 0)) {
41970 kobject_put(&zone->kobj);
41971 return ret;
41972diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41973index 025c429..314062f 100644
41974--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
41975+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41976@@ -54,7 +54,7 @@
41977
41978 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
41979 #define SMALL_ALLOCATION 16
41980-#define FREE_ALL_PAGES (~0U)
41981+#define FREE_ALL_PAGES (~0UL)
41982 /* times are in msecs */
41983 #define PAGE_FREE_INTERVAL 1000
41984
41985@@ -299,15 +299,14 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
41986 * @free_all: If set to true will free all pages in pool
41987 * @use_static: Safe to use static buffer
41988 **/
41989-static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
41990+static unsigned long ttm_page_pool_free(struct ttm_page_pool *pool, unsigned long nr_free,
41991 bool use_static)
41992 {
41993 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
41994 unsigned long irq_flags;
41995 struct page *p;
41996 struct page **pages_to_free;
41997- unsigned freed_pages = 0,
41998- npages_to_free = nr_free;
41999+ unsigned long freed_pages = 0, npages_to_free = nr_free;
42000
42001 if (NUM_PAGES_TO_ALLOC < nr_free)
42002 npages_to_free = NUM_PAGES_TO_ALLOC;
42003@@ -371,7 +370,8 @@ restart:
42004 __list_del(&p->lru, &pool->list);
42005
42006 ttm_pool_update_free_locked(pool, freed_pages);
42007- nr_free -= freed_pages;
42008+ if (likely(nr_free != FREE_ALL_PAGES))
42009+ nr_free -= freed_pages;
42010 }
42011
42012 spin_unlock_irqrestore(&pool->lock, irq_flags);
42013@@ -399,7 +399,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
42014 unsigned i;
42015 unsigned pool_offset;
42016 struct ttm_page_pool *pool;
42017- int shrink_pages = sc->nr_to_scan;
42018+ unsigned long shrink_pages = sc->nr_to_scan;
42019 unsigned long freed = 0;
42020
42021 if (!mutex_trylock(&lock))
42022@@ -407,7 +407,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
42023 pool_offset = ++start_pool % NUM_POOLS;
42024 /* select start pool in round robin fashion */
42025 for (i = 0; i < NUM_POOLS; ++i) {
42026- unsigned nr_free = shrink_pages;
42027+ unsigned long nr_free = shrink_pages;
42028 if (shrink_pages == 0)
42029 break;
42030 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
42031@@ -673,7 +673,7 @@ out:
42032 }
42033
42034 /* Put all pages in pages list to correct pool to wait for reuse */
42035-static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
42036+static void ttm_put_pages(struct page **pages, unsigned long npages, int flags,
42037 enum ttm_caching_state cstate)
42038 {
42039 unsigned long irq_flags;
42040@@ -728,7 +728,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
42041 struct list_head plist;
42042 struct page *p = NULL;
42043 gfp_t gfp_flags = GFP_USER;
42044- unsigned count;
42045+ unsigned long count;
42046 int r;
42047
42048 /* set zero flag for page allocation if required */
42049diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
42050index 01e1d27..aaa018a 100644
42051--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
42052+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
42053@@ -56,7 +56,7 @@
42054
42055 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
42056 #define SMALL_ALLOCATION 4
42057-#define FREE_ALL_PAGES (~0U)
42058+#define FREE_ALL_PAGES (~0UL)
42059 /* times are in msecs */
42060 #define IS_UNDEFINED (0)
42061 #define IS_WC (1<<1)
42062@@ -413,7 +413,7 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
42063 * @nr_free: If set to true will free all pages in pool
42064 * @use_static: Safe to use static buffer
42065 **/
42066-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
42067+static unsigned long ttm_dma_page_pool_free(struct dma_pool *pool, unsigned long nr_free,
42068 bool use_static)
42069 {
42070 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
42071@@ -421,8 +421,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
42072 struct dma_page *dma_p, *tmp;
42073 struct page **pages_to_free;
42074 struct list_head d_pages;
42075- unsigned freed_pages = 0,
42076- npages_to_free = nr_free;
42077+ unsigned long freed_pages = 0, npages_to_free = nr_free;
42078
42079 if (NUM_PAGES_TO_ALLOC < nr_free)
42080 npages_to_free = NUM_PAGES_TO_ALLOC;
42081@@ -499,7 +498,8 @@ restart:
42082 /* remove range of pages from the pool */
42083 if (freed_pages) {
42084 ttm_pool_update_free_locked(pool, freed_pages);
42085- nr_free -= freed_pages;
42086+ if (likely(nr_free != FREE_ALL_PAGES))
42087+ nr_free -= freed_pages;
42088 }
42089
42090 spin_unlock_irqrestore(&pool->lock, irq_flags);
42091@@ -936,7 +936,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
42092 struct dma_page *d_page, *next;
42093 enum pool_type type;
42094 bool is_cached = false;
42095- unsigned count = 0, i, npages = 0;
42096+ unsigned long count = 0, i, npages = 0;
42097 unsigned long irq_flags;
42098
42099 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
42100@@ -1012,7 +1012,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
42101 static unsigned start_pool;
42102 unsigned idx = 0;
42103 unsigned pool_offset;
42104- unsigned shrink_pages = sc->nr_to_scan;
42105+ unsigned long shrink_pages = sc->nr_to_scan;
42106 struct device_pools *p;
42107 unsigned long freed = 0;
42108
42109@@ -1025,7 +1025,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
42110 goto out;
42111 pool_offset = ++start_pool % _manager->npools;
42112 list_for_each_entry(p, &_manager->pools, pools) {
42113- unsigned nr_free;
42114+ unsigned long nr_free;
42115
42116 if (!p->dev)
42117 continue;
42118@@ -1039,7 +1039,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
42119 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
42120 freed += nr_free - shrink_pages;
42121
42122- pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
42123+ pr_debug("%s: (%s:%d) Asked to shrink %lu, have %lu more to go\n",
42124 p->pool->dev_name, p->pool->name, current->pid,
42125 nr_free, shrink_pages);
42126 }
42127diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
42128index 5fc16ce..1bd84ec 100644
42129--- a/drivers/gpu/drm/udl/udl_fb.c
42130+++ b/drivers/gpu/drm/udl/udl_fb.c
42131@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
42132 fb_deferred_io_cleanup(info);
42133 kfree(info->fbdefio);
42134 info->fbdefio = NULL;
42135- info->fbops->fb_mmap = udl_fb_mmap;
42136 }
42137
42138 pr_warn("released /dev/fb%d user=%d count=%d\n",
42139diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
42140index ef8c500..01030c8 100644
42141--- a/drivers/gpu/drm/via/via_drv.h
42142+++ b/drivers/gpu/drm/via/via_drv.h
42143@@ -53,7 +53,7 @@ typedef struct drm_via_ring_buffer {
42144 typedef uint32_t maskarray_t[5];
42145
42146 typedef struct drm_via_irq {
42147- atomic_t irq_received;
42148+ atomic_unchecked_t irq_received;
42149 uint32_t pending_mask;
42150 uint32_t enable_mask;
42151 wait_queue_head_t irq_queue;
42152@@ -77,7 +77,7 @@ typedef struct drm_via_private {
42153 struct timeval last_vblank;
42154 int last_vblank_valid;
42155 unsigned usec_per_vblank;
42156- atomic_t vbl_received;
42157+ atomic_unchecked_t vbl_received;
42158 drm_via_state_t hc_state;
42159 char pci_buf[VIA_PCI_BUF_SIZE];
42160 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
42161diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
42162index 1319433..a993b0c 100644
42163--- a/drivers/gpu/drm/via/via_irq.c
42164+++ b/drivers/gpu/drm/via/via_irq.c
42165@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
42166 if (crtc != 0)
42167 return 0;
42168
42169- return atomic_read(&dev_priv->vbl_received);
42170+ return atomic_read_unchecked(&dev_priv->vbl_received);
42171 }
42172
42173 irqreturn_t via_driver_irq_handler(int irq, void *arg)
42174@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
42175
42176 status = VIA_READ(VIA_REG_INTERRUPT);
42177 if (status & VIA_IRQ_VBLANK_PENDING) {
42178- atomic_inc(&dev_priv->vbl_received);
42179- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
42180+ atomic_inc_unchecked(&dev_priv->vbl_received);
42181+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
42182 do_gettimeofday(&cur_vblank);
42183 if (dev_priv->last_vblank_valid) {
42184 dev_priv->usec_per_vblank =
42185@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
42186 dev_priv->last_vblank = cur_vblank;
42187 dev_priv->last_vblank_valid = 1;
42188 }
42189- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
42190+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
42191 DRM_DEBUG("US per vblank is: %u\n",
42192 dev_priv->usec_per_vblank);
42193 }
42194@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
42195
42196 for (i = 0; i < dev_priv->num_irqs; ++i) {
42197 if (status & cur_irq->pending_mask) {
42198- atomic_inc(&cur_irq->irq_received);
42199+ atomic_inc_unchecked(&cur_irq->irq_received);
42200 wake_up(&cur_irq->irq_queue);
42201 handled = 1;
42202 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
42203@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
42204 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
42205 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
42206 masks[irq][4]));
42207- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
42208+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
42209 } else {
42210 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
42211 (((cur_irq_sequence =
42212- atomic_read(&cur_irq->irq_received)) -
42213+ atomic_read_unchecked(&cur_irq->irq_received)) -
42214 *sequence) <= (1 << 23)));
42215 }
42216 *sequence = cur_irq_sequence;
42217@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
42218 }
42219
42220 for (i = 0; i < dev_priv->num_irqs; ++i) {
42221- atomic_set(&cur_irq->irq_received, 0);
42222+ atomic_set_unchecked(&cur_irq->irq_received, 0);
42223 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
42224 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
42225 init_waitqueue_head(&cur_irq->irq_queue);
42226@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
42227 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
42228 case VIA_IRQ_RELATIVE:
42229 irqwait->request.sequence +=
42230- atomic_read(&cur_irq->irq_received);
42231+ atomic_read_unchecked(&cur_irq->irq_received);
42232 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
42233 case VIA_IRQ_ABSOLUTE:
42234 break;
42235diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
42236index d26a6da..5fa41ed 100644
42237--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
42238+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
42239@@ -447,7 +447,7 @@ struct vmw_private {
42240 * Fencing and IRQs.
42241 */
42242
42243- atomic_t marker_seq;
42244+ atomic_unchecked_t marker_seq;
42245 wait_queue_head_t fence_queue;
42246 wait_queue_head_t fifo_queue;
42247 spinlock_t waiter_lock;
42248diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
42249index 39f2b03..d1b0a64 100644
42250--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
42251+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
42252@@ -152,7 +152,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
42253 (unsigned int) min,
42254 (unsigned int) fifo->capabilities);
42255
42256- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
42257+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
42258 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
42259 vmw_marker_queue_init(&fifo->marker_queue);
42260 return vmw_fifo_send_fence(dev_priv, &dummy);
42261@@ -372,7 +372,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
42262 if (reserveable)
42263 iowrite32(bytes, fifo_mem +
42264 SVGA_FIFO_RESERVED);
42265- return fifo_mem + (next_cmd >> 2);
42266+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
42267 } else {
42268 need_bounce = true;
42269 }
42270@@ -492,7 +492,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
42271
42272 fm = vmw_fifo_reserve(dev_priv, bytes);
42273 if (unlikely(fm == NULL)) {
42274- *seqno = atomic_read(&dev_priv->marker_seq);
42275+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
42276 ret = -ENOMEM;
42277 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
42278 false, 3*HZ);
42279@@ -500,7 +500,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
42280 }
42281
42282 do {
42283- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
42284+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
42285 } while (*seqno == 0);
42286
42287 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
42288diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
42289index 170b61b..fec7348 100644
42290--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
42291+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
42292@@ -164,9 +164,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
42293 }
42294
42295 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
42296- vmw_gmrid_man_init,
42297- vmw_gmrid_man_takedown,
42298- vmw_gmrid_man_get_node,
42299- vmw_gmrid_man_put_node,
42300- vmw_gmrid_man_debug
42301+ .init = vmw_gmrid_man_init,
42302+ .takedown = vmw_gmrid_man_takedown,
42303+ .get_node = vmw_gmrid_man_get_node,
42304+ .put_node = vmw_gmrid_man_put_node,
42305+ .debug = vmw_gmrid_man_debug
42306 };
42307diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
42308index 69c8ce2..cacb0ab 100644
42309--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
42310+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
42311@@ -235,7 +235,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
42312 int ret;
42313
42314 num_clips = arg->num_clips;
42315- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
42316+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
42317
42318 if (unlikely(num_clips == 0))
42319 return 0;
42320@@ -318,7 +318,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
42321 int ret;
42322
42323 num_clips = arg->num_clips;
42324- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
42325+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
42326
42327 if (unlikely(num_clips == 0))
42328 return 0;
42329diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
42330index 9fe9827..0aa2fc0 100644
42331--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
42332+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
42333@@ -102,7 +102,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
42334 * emitted. Then the fence is stale and signaled.
42335 */
42336
42337- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
42338+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
42339 > VMW_FENCE_WRAP);
42340
42341 return ret;
42342@@ -133,7 +133,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
42343
42344 if (fifo_idle)
42345 down_read(&fifo_state->rwsem);
42346- signal_seq = atomic_read(&dev_priv->marker_seq);
42347+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
42348 ret = 0;
42349
42350 for (;;) {
42351diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
42352index efd1ffd..0ae13ca 100644
42353--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
42354+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
42355@@ -135,7 +135,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
42356 while (!vmw_lag_lt(queue, us)) {
42357 spin_lock(&queue->lock);
42358 if (list_empty(&queue->head))
42359- seqno = atomic_read(&dev_priv->marker_seq);
42360+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
42361 else {
42362 marker = list_first_entry(&queue->head,
42363 struct vmw_marker, head);
42364diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
42365index 37ac7b5..d52a5c9 100644
42366--- a/drivers/gpu/vga/vga_switcheroo.c
42367+++ b/drivers/gpu/vga/vga_switcheroo.c
42368@@ -644,7 +644,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
42369
42370 /* this version is for the case where the power switch is separate
42371 to the device being powered down. */
42372-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
42373+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
42374 {
42375 /* copy over all the bus versions */
42376 if (dev->bus && dev->bus->pm) {
42377@@ -695,7 +695,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
42378 return ret;
42379 }
42380
42381-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
42382+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
42383 {
42384 /* copy over all the bus versions */
42385 if (dev->bus && dev->bus->pm) {
42386diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
42387index 56ce8c2..32ce524 100644
42388--- a/drivers/hid/hid-core.c
42389+++ b/drivers/hid/hid-core.c
42390@@ -2531,7 +2531,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
42391
42392 int hid_add_device(struct hid_device *hdev)
42393 {
42394- static atomic_t id = ATOMIC_INIT(0);
42395+ static atomic_unchecked_t id = ATOMIC_INIT(0);
42396 int ret;
42397
42398 if (WARN_ON(hdev->status & HID_STAT_ADDED))
42399@@ -2574,7 +2574,7 @@ int hid_add_device(struct hid_device *hdev)
42400 /* XXX hack, any other cleaner solution after the driver core
42401 * is converted to allow more than 20 bytes as the device name? */
42402 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
42403- hdev->vendor, hdev->product, atomic_inc_return(&id));
42404+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
42405
42406 hid_debug_register(hdev, dev_name(&hdev->dev));
42407 ret = device_add(&hdev->dev);
42408diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
42409index c13fb5b..55a3802 100644
42410--- a/drivers/hid/hid-wiimote-debug.c
42411+++ b/drivers/hid/hid-wiimote-debug.c
42412@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
42413 else if (size == 0)
42414 return -EIO;
42415
42416- if (copy_to_user(u, buf, size))
42417+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
42418 return -EFAULT;
42419
42420 *off += size;
42421diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
42422index 00bc30e..d8e5097 100644
42423--- a/drivers/hv/channel.c
42424+++ b/drivers/hv/channel.c
42425@@ -370,7 +370,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
42426 int ret = 0;
42427
42428 next_gpadl_handle =
42429- (atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
42430+ (atomic_inc_return_unchecked(&vmbus_connection.next_gpadl_handle) - 1);
42431
42432 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
42433 if (ret)
42434diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
42435index 50e51a5..b0bfd78 100644
42436--- a/drivers/hv/hv.c
42437+++ b/drivers/hv/hv.c
42438@@ -118,7 +118,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
42439 u64 output_address = (output) ? virt_to_phys(output) : 0;
42440 u32 output_address_hi = output_address >> 32;
42441 u32 output_address_lo = output_address & 0xFFFFFFFF;
42442- void *hypercall_page = hv_context.hypercall_page;
42443+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
42444
42445 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
42446 "=a"(hv_status_lo) : "d" (control_hi),
42447@@ -164,7 +164,7 @@ int hv_init(void)
42448 /* See if the hypercall page is already set */
42449 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
42450
42451- virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
42452+ virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
42453
42454 if (!virtaddr)
42455 goto cleanup;
42456diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
42457index ff16938..e60879c 100644
42458--- a/drivers/hv/hv_balloon.c
42459+++ b/drivers/hv/hv_balloon.c
42460@@ -470,7 +470,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
42461
42462 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
42463 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
42464-static atomic_t trans_id = ATOMIC_INIT(0);
42465+static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
42466
42467 static int dm_ring_size = (5 * PAGE_SIZE);
42468
42469@@ -947,7 +947,7 @@ static void hot_add_req(struct work_struct *dummy)
42470 pr_info("Memory hot add failed\n");
42471
42472 dm->state = DM_INITIALIZED;
42473- resp.hdr.trans_id = atomic_inc_return(&trans_id);
42474+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42475 vmbus_sendpacket(dm->dev->channel, &resp,
42476 sizeof(struct dm_hot_add_response),
42477 (unsigned long)NULL,
42478@@ -1028,7 +1028,7 @@ static void post_status(struct hv_dynmem_device *dm)
42479 memset(&status, 0, sizeof(struct dm_status));
42480 status.hdr.type = DM_STATUS_REPORT;
42481 status.hdr.size = sizeof(struct dm_status);
42482- status.hdr.trans_id = atomic_inc_return(&trans_id);
42483+ status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42484
42485 /*
42486 * The host expects the guest to report free memory.
42487@@ -1048,7 +1048,7 @@ static void post_status(struct hv_dynmem_device *dm)
42488 * send the status. This can happen if we were interrupted
42489 * after we picked our transaction ID.
42490 */
42491- if (status.hdr.trans_id != atomic_read(&trans_id))
42492+ if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
42493 return;
42494
42495 /*
42496@@ -1188,7 +1188,7 @@ static void balloon_up(struct work_struct *dummy)
42497 */
42498
42499 do {
42500- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
42501+ bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42502 ret = vmbus_sendpacket(dm_device.dev->channel,
42503 bl_resp,
42504 bl_resp->hdr.size,
42505@@ -1234,7 +1234,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
42506
42507 memset(&resp, 0, sizeof(struct dm_unballoon_response));
42508 resp.hdr.type = DM_UNBALLOON_RESPONSE;
42509- resp.hdr.trans_id = atomic_inc_return(&trans_id);
42510+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42511 resp.hdr.size = sizeof(struct dm_unballoon_response);
42512
42513 vmbus_sendpacket(dm_device.dev->channel, &resp,
42514@@ -1295,7 +1295,7 @@ static void version_resp(struct hv_dynmem_device *dm,
42515 memset(&version_req, 0, sizeof(struct dm_version_request));
42516 version_req.hdr.type = DM_VERSION_REQUEST;
42517 version_req.hdr.size = sizeof(struct dm_version_request);
42518- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
42519+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42520 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
42521 version_req.is_last_attempt = 1;
42522
42523@@ -1468,7 +1468,7 @@ static int balloon_probe(struct hv_device *dev,
42524 memset(&version_req, 0, sizeof(struct dm_version_request));
42525 version_req.hdr.type = DM_VERSION_REQUEST;
42526 version_req.hdr.size = sizeof(struct dm_version_request);
42527- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
42528+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42529 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
42530 version_req.is_last_attempt = 0;
42531
42532@@ -1499,7 +1499,7 @@ static int balloon_probe(struct hv_device *dev,
42533 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
42534 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
42535 cap_msg.hdr.size = sizeof(struct dm_capabilities);
42536- cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
42537+ cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42538
42539 cap_msg.caps.cap_bits.balloon = 1;
42540 cap_msg.caps.cap_bits.hot_add = 1;
42541diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
42542index 44b1c94..6dccc2c 100644
42543--- a/drivers/hv/hyperv_vmbus.h
42544+++ b/drivers/hv/hyperv_vmbus.h
42545@@ -632,7 +632,7 @@ enum vmbus_connect_state {
42546 struct vmbus_connection {
42547 enum vmbus_connect_state conn_state;
42548
42549- atomic_t next_gpadl_handle;
42550+ atomic_unchecked_t next_gpadl_handle;
42551
42552 /*
42553 * Represents channel interrupts. Each bit position represents a
42554diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
42555index f518b8d7..4bc0b64 100644
42556--- a/drivers/hv/vmbus_drv.c
42557+++ b/drivers/hv/vmbus_drv.c
42558@@ -840,10 +840,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
42559 {
42560 int ret = 0;
42561
42562- static atomic_t device_num = ATOMIC_INIT(0);
42563+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
42564
42565 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
42566- atomic_inc_return(&device_num));
42567+ atomic_inc_return_unchecked(&device_num));
42568
42569 child_device_obj->device.bus = &hv_bus;
42570 child_device_obj->device.parent = &hv_acpi_dev->dev;
42571diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
42572index 579bdf9..0dac21d5 100644
42573--- a/drivers/hwmon/acpi_power_meter.c
42574+++ b/drivers/hwmon/acpi_power_meter.c
42575@@ -116,7 +116,7 @@ struct sensor_template {
42576 struct device_attribute *devattr,
42577 const char *buf, size_t count);
42578 int index;
42579-};
42580+} __do_const;
42581
42582 /* Averaging interval */
42583 static int update_avg_interval(struct acpi_power_meter_resource *resource)
42584@@ -631,7 +631,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
42585 struct sensor_template *attrs)
42586 {
42587 struct device *dev = &resource->acpi_dev->dev;
42588- struct sensor_device_attribute *sensors =
42589+ sensor_device_attribute_no_const *sensors =
42590 &resource->sensors[resource->num_sensors];
42591 int res = 0;
42592
42593@@ -973,7 +973,7 @@ static int __init enable_cap_knobs(const struct dmi_system_id *d)
42594 return 0;
42595 }
42596
42597-static struct dmi_system_id __initdata pm_dmi_table[] = {
42598+static const struct dmi_system_id __initconst pm_dmi_table[] = {
42599 {
42600 enable_cap_knobs, "IBM Active Energy Manager",
42601 {
42602diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
42603index 0af63da..05a183a 100644
42604--- a/drivers/hwmon/applesmc.c
42605+++ b/drivers/hwmon/applesmc.c
42606@@ -1105,7 +1105,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
42607 {
42608 struct applesmc_node_group *grp;
42609 struct applesmc_dev_attr *node;
42610- struct attribute *attr;
42611+ attribute_no_const *attr;
42612 int ret, i;
42613
42614 for (grp = groups; grp->format; grp++) {
42615diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
42616index cccef87..06ce8ec 100644
42617--- a/drivers/hwmon/asus_atk0110.c
42618+++ b/drivers/hwmon/asus_atk0110.c
42619@@ -147,10 +147,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
42620 struct atk_sensor_data {
42621 struct list_head list;
42622 struct atk_data *data;
42623- struct device_attribute label_attr;
42624- struct device_attribute input_attr;
42625- struct device_attribute limit1_attr;
42626- struct device_attribute limit2_attr;
42627+ device_attribute_no_const label_attr;
42628+ device_attribute_no_const input_attr;
42629+ device_attribute_no_const limit1_attr;
42630+ device_attribute_no_const limit2_attr;
42631 char label_attr_name[ATTR_NAME_SIZE];
42632 char input_attr_name[ATTR_NAME_SIZE];
42633 char limit1_attr_name[ATTR_NAME_SIZE];
42634@@ -270,7 +270,7 @@ static ssize_t atk_name_show(struct device *dev,
42635 static struct device_attribute atk_name_attr =
42636 __ATTR(name, 0444, atk_name_show, NULL);
42637
42638-static void atk_init_attribute(struct device_attribute *attr, char *name,
42639+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
42640 sysfs_show_func show)
42641 {
42642 sysfs_attr_init(&attr->attr);
42643diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
42644index 5b7fec8..05c957a 100644
42645--- a/drivers/hwmon/coretemp.c
42646+++ b/drivers/hwmon/coretemp.c
42647@@ -783,7 +783,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
42648 return NOTIFY_OK;
42649 }
42650
42651-static struct notifier_block coretemp_cpu_notifier __refdata = {
42652+static struct notifier_block coretemp_cpu_notifier = {
42653 .notifier_call = coretemp_cpu_callback,
42654 };
42655
42656diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
42657index 7a8a6fb..015c1fd 100644
42658--- a/drivers/hwmon/ibmaem.c
42659+++ b/drivers/hwmon/ibmaem.c
42660@@ -924,7 +924,7 @@ static int aem_register_sensors(struct aem_data *data,
42661 struct aem_rw_sensor_template *rw)
42662 {
42663 struct device *dev = &data->pdev->dev;
42664- struct sensor_device_attribute *sensors = data->sensors;
42665+ sensor_device_attribute_no_const *sensors = data->sensors;
42666 int err;
42667
42668 /* Set up read-only sensors */
42669diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
42670index 17ae2eb..21b71dd 100644
42671--- a/drivers/hwmon/iio_hwmon.c
42672+++ b/drivers/hwmon/iio_hwmon.c
42673@@ -61,7 +61,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
42674 {
42675 struct device *dev = &pdev->dev;
42676 struct iio_hwmon_state *st;
42677- struct sensor_device_attribute *a;
42678+ sensor_device_attribute_no_const *a;
42679 int ret, i;
42680 int in_i = 1, temp_i = 1, curr_i = 1, humidity_i = 1;
42681 enum iio_chan_type type;
42682diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
42683index f3830db..9f4d6d5 100644
42684--- a/drivers/hwmon/nct6683.c
42685+++ b/drivers/hwmon/nct6683.c
42686@@ -397,11 +397,11 @@ static struct attribute_group *
42687 nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42688 int repeat)
42689 {
42690- struct sensor_device_attribute_2 *a2;
42691- struct sensor_device_attribute *a;
42692+ sensor_device_attribute_2_no_const *a2;
42693+ sensor_device_attribute_no_const *a;
42694 struct sensor_device_template **t;
42695 struct sensor_device_attr_u *su;
42696- struct attribute_group *group;
42697+ attribute_group_no_const *group;
42698 struct attribute **attrs;
42699 int i, j, count;
42700
42701diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
42702index 1be4117..88ae1e1 100644
42703--- a/drivers/hwmon/nct6775.c
42704+++ b/drivers/hwmon/nct6775.c
42705@@ -952,10 +952,10 @@ static struct attribute_group *
42706 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42707 int repeat)
42708 {
42709- struct attribute_group *group;
42710+ attribute_group_no_const *group;
42711 struct sensor_device_attr_u *su;
42712- struct sensor_device_attribute *a;
42713- struct sensor_device_attribute_2 *a2;
42714+ sensor_device_attribute_no_const *a;
42715+ sensor_device_attribute_2_no_const *a2;
42716 struct attribute **attrs;
42717 struct sensor_device_template **t;
42718 int i, count;
42719diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
42720index f2e47c7..45d7941 100644
42721--- a/drivers/hwmon/pmbus/pmbus_core.c
42722+++ b/drivers/hwmon/pmbus/pmbus_core.c
42723@@ -816,7 +816,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
42724 return 0;
42725 }
42726
42727-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42728+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
42729 const char *name,
42730 umode_t mode,
42731 ssize_t (*show)(struct device *dev,
42732@@ -833,7 +833,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42733 dev_attr->store = store;
42734 }
42735
42736-static void pmbus_attr_init(struct sensor_device_attribute *a,
42737+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
42738 const char *name,
42739 umode_t mode,
42740 ssize_t (*show)(struct device *dev,
42741@@ -855,7 +855,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
42742 u16 reg, u8 mask)
42743 {
42744 struct pmbus_boolean *boolean;
42745- struct sensor_device_attribute *a;
42746+ sensor_device_attribute_no_const *a;
42747
42748 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
42749 if (!boolean)
42750@@ -880,7 +880,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
42751 bool update, bool readonly)
42752 {
42753 struct pmbus_sensor *sensor;
42754- struct device_attribute *a;
42755+ device_attribute_no_const *a;
42756
42757 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
42758 if (!sensor)
42759@@ -911,7 +911,7 @@ static int pmbus_add_label(struct pmbus_data *data,
42760 const char *lstring, int index)
42761 {
42762 struct pmbus_label *label;
42763- struct device_attribute *a;
42764+ device_attribute_no_const *a;
42765
42766 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
42767 if (!label)
42768diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
42769index d4f0935..7420593 100644
42770--- a/drivers/hwmon/sht15.c
42771+++ b/drivers/hwmon/sht15.c
42772@@ -169,7 +169,7 @@ struct sht15_data {
42773 int supply_uv;
42774 bool supply_uv_valid;
42775 struct work_struct update_supply_work;
42776- atomic_t interrupt_handled;
42777+ atomic_unchecked_t interrupt_handled;
42778 };
42779
42780 /**
42781@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
42782 ret = gpio_direction_input(data->pdata->gpio_data);
42783 if (ret)
42784 return ret;
42785- atomic_set(&data->interrupt_handled, 0);
42786+ atomic_set_unchecked(&data->interrupt_handled, 0);
42787
42788 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42789 if (gpio_get_value(data->pdata->gpio_data) == 0) {
42790 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
42791 /* Only relevant if the interrupt hasn't occurred. */
42792- if (!atomic_read(&data->interrupt_handled))
42793+ if (!atomic_read_unchecked(&data->interrupt_handled))
42794 schedule_work(&data->read_work);
42795 }
42796 ret = wait_event_timeout(data->wait_queue,
42797@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
42798
42799 /* First disable the interrupt */
42800 disable_irq_nosync(irq);
42801- atomic_inc(&data->interrupt_handled);
42802+ atomic_inc_unchecked(&data->interrupt_handled);
42803 /* Then schedule a reading work struct */
42804 if (data->state != SHT15_READING_NOTHING)
42805 schedule_work(&data->read_work);
42806@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
42807 * If not, then start the interrupt again - care here as could
42808 * have gone low in meantime so verify it hasn't!
42809 */
42810- atomic_set(&data->interrupt_handled, 0);
42811+ atomic_set_unchecked(&data->interrupt_handled, 0);
42812 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42813 /* If still not occurred or another handler was scheduled */
42814 if (gpio_get_value(data->pdata->gpio_data)
42815- || atomic_read(&data->interrupt_handled))
42816+ || atomic_read_unchecked(&data->interrupt_handled))
42817 return;
42818 }
42819
42820diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
42821index ac91c07..8e69663 100644
42822--- a/drivers/hwmon/via-cputemp.c
42823+++ b/drivers/hwmon/via-cputemp.c
42824@@ -295,7 +295,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
42825 return NOTIFY_OK;
42826 }
42827
42828-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
42829+static struct notifier_block via_cputemp_cpu_notifier = {
42830 .notifier_call = via_cputemp_cpu_callback,
42831 };
42832
42833diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
42834index 65e3240..e6c511d 100644
42835--- a/drivers/i2c/busses/i2c-amd756-s4882.c
42836+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
42837@@ -39,7 +39,7 @@
42838 extern struct i2c_adapter amd756_smbus;
42839
42840 static struct i2c_adapter *s4882_adapter;
42841-static struct i2c_algorithm *s4882_algo;
42842+static i2c_algorithm_no_const *s4882_algo;
42843
42844 /* Wrapper access functions for multiplexed SMBus */
42845 static DEFINE_MUTEX(amd756_lock);
42846diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
42847index b19a310..d6eece0 100644
42848--- a/drivers/i2c/busses/i2c-diolan-u2c.c
42849+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
42850@@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
42851 /* usb layer */
42852
42853 /* Send command to device, and get response. */
42854-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42855+static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42856 {
42857 int ret = 0;
42858 int actual;
42859diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
42860index 88eda09..cf40434 100644
42861--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
42862+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
42863@@ -37,7 +37,7 @@
42864 extern struct i2c_adapter *nforce2_smbus;
42865
42866 static struct i2c_adapter *s4985_adapter;
42867-static struct i2c_algorithm *s4985_algo;
42868+static i2c_algorithm_no_const *s4985_algo;
42869
42870 /* Wrapper access functions for multiplexed SMBus */
42871 static DEFINE_MUTEX(nforce2_lock);
42872diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
42873index 71c7a39..71dd3e0 100644
42874--- a/drivers/i2c/i2c-dev.c
42875+++ b/drivers/i2c/i2c-dev.c
42876@@ -272,7 +272,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
42877 break;
42878 }
42879
42880- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
42881+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
42882 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
42883 if (IS_ERR(rdwr_pa[i].buf)) {
42884 res = PTR_ERR(rdwr_pa[i].buf);
42885diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
42886index 0b510ba..4fbb5085 100644
42887--- a/drivers/ide/ide-cd.c
42888+++ b/drivers/ide/ide-cd.c
42889@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
42890 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
42891 if ((unsigned long)buf & alignment
42892 || blk_rq_bytes(rq) & q->dma_pad_mask
42893- || object_is_on_stack(buf))
42894+ || object_starts_on_stack(buf))
42895 drive->dma = 0;
42896 }
42897 }
42898diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
42899index 4df97f6..c751151 100644
42900--- a/drivers/iio/industrialio-core.c
42901+++ b/drivers/iio/industrialio-core.c
42902@@ -570,7 +570,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
42903 }
42904
42905 static
42906-int __iio_device_attr_init(struct device_attribute *dev_attr,
42907+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
42908 const char *postfix,
42909 struct iio_chan_spec const *chan,
42910 ssize_t (*readfunc)(struct device *dev,
42911diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
42912index e28a494..f7c2671 100644
42913--- a/drivers/infiniband/core/cm.c
42914+++ b/drivers/infiniband/core/cm.c
42915@@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
42916
42917 struct cm_counter_group {
42918 struct kobject obj;
42919- atomic_long_t counter[CM_ATTR_COUNT];
42920+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
42921 };
42922
42923 struct cm_counter_attribute {
42924@@ -1398,7 +1398,7 @@ static void cm_dup_req_handler(struct cm_work *work,
42925 struct ib_mad_send_buf *msg = NULL;
42926 int ret;
42927
42928- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42929+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42930 counter[CM_REQ_COUNTER]);
42931
42932 /* Quick state check to discard duplicate REQs. */
42933@@ -1785,7 +1785,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
42934 if (!cm_id_priv)
42935 return;
42936
42937- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42938+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42939 counter[CM_REP_COUNTER]);
42940 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
42941 if (ret)
42942@@ -1952,7 +1952,7 @@ static int cm_rtu_handler(struct cm_work *work)
42943 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
42944 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
42945 spin_unlock_irq(&cm_id_priv->lock);
42946- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42947+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42948 counter[CM_RTU_COUNTER]);
42949 goto out;
42950 }
42951@@ -2135,7 +2135,7 @@ static int cm_dreq_handler(struct cm_work *work)
42952 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
42953 dreq_msg->local_comm_id);
42954 if (!cm_id_priv) {
42955- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42956+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42957 counter[CM_DREQ_COUNTER]);
42958 cm_issue_drep(work->port, work->mad_recv_wc);
42959 return -EINVAL;
42960@@ -2160,7 +2160,7 @@ static int cm_dreq_handler(struct cm_work *work)
42961 case IB_CM_MRA_REP_RCVD:
42962 break;
42963 case IB_CM_TIMEWAIT:
42964- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42965+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42966 counter[CM_DREQ_COUNTER]);
42967 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
42968 goto unlock;
42969@@ -2174,7 +2174,7 @@ static int cm_dreq_handler(struct cm_work *work)
42970 cm_free_msg(msg);
42971 goto deref;
42972 case IB_CM_DREQ_RCVD:
42973- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42974+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42975 counter[CM_DREQ_COUNTER]);
42976 goto unlock;
42977 default:
42978@@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work *work)
42979 ib_modify_mad(cm_id_priv->av.port->mad_agent,
42980 cm_id_priv->msg, timeout)) {
42981 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
42982- atomic_long_inc(&work->port->
42983+ atomic_long_inc_unchecked(&work->port->
42984 counter_group[CM_RECV_DUPLICATES].
42985 counter[CM_MRA_COUNTER]);
42986 goto out;
42987@@ -2550,7 +2550,7 @@ static int cm_mra_handler(struct cm_work *work)
42988 break;
42989 case IB_CM_MRA_REQ_RCVD:
42990 case IB_CM_MRA_REP_RCVD:
42991- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42992+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42993 counter[CM_MRA_COUNTER]);
42994 /* fall through */
42995 default:
42996@@ -2712,7 +2712,7 @@ static int cm_lap_handler(struct cm_work *work)
42997 case IB_CM_LAP_IDLE:
42998 break;
42999 case IB_CM_MRA_LAP_SENT:
43000- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43001+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43002 counter[CM_LAP_COUNTER]);
43003 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
43004 goto unlock;
43005@@ -2728,7 +2728,7 @@ static int cm_lap_handler(struct cm_work *work)
43006 cm_free_msg(msg);
43007 goto deref;
43008 case IB_CM_LAP_RCVD:
43009- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43010+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43011 counter[CM_LAP_COUNTER]);
43012 goto unlock;
43013 default:
43014@@ -3012,7 +3012,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
43015 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
43016 if (cur_cm_id_priv) {
43017 spin_unlock_irq(&cm.lock);
43018- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43019+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43020 counter[CM_SIDR_REQ_COUNTER]);
43021 goto out; /* Duplicate message. */
43022 }
43023@@ -3224,10 +3224,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
43024 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
43025 msg->retries = 1;
43026
43027- atomic_long_add(1 + msg->retries,
43028+ atomic_long_add_unchecked(1 + msg->retries,
43029 &port->counter_group[CM_XMIT].counter[attr_index]);
43030 if (msg->retries)
43031- atomic_long_add(msg->retries,
43032+ atomic_long_add_unchecked(msg->retries,
43033 &port->counter_group[CM_XMIT_RETRIES].
43034 counter[attr_index]);
43035
43036@@ -3437,7 +3437,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
43037 }
43038
43039 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
43040- atomic_long_inc(&port->counter_group[CM_RECV].
43041+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
43042 counter[attr_id - CM_ATTR_ID_OFFSET]);
43043
43044 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
43045@@ -3668,7 +3668,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
43046 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
43047
43048 return sprintf(buf, "%ld\n",
43049- atomic_long_read(&group->counter[cm_attr->index]));
43050+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
43051 }
43052
43053 static const struct sysfs_ops cm_counter_ops = {
43054diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
43055index 9f5ad7c..588cd84 100644
43056--- a/drivers/infiniband/core/fmr_pool.c
43057+++ b/drivers/infiniband/core/fmr_pool.c
43058@@ -98,8 +98,8 @@ struct ib_fmr_pool {
43059
43060 struct task_struct *thread;
43061
43062- atomic_t req_ser;
43063- atomic_t flush_ser;
43064+ atomic_unchecked_t req_ser;
43065+ atomic_unchecked_t flush_ser;
43066
43067 wait_queue_head_t force_wait;
43068 };
43069@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
43070 struct ib_fmr_pool *pool = pool_ptr;
43071
43072 do {
43073- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
43074+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
43075 ib_fmr_batch_release(pool);
43076
43077- atomic_inc(&pool->flush_ser);
43078+ atomic_inc_unchecked(&pool->flush_ser);
43079 wake_up_interruptible(&pool->force_wait);
43080
43081 if (pool->flush_function)
43082@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
43083 }
43084
43085 set_current_state(TASK_INTERRUPTIBLE);
43086- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
43087+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
43088 !kthread_should_stop())
43089 schedule();
43090 __set_current_state(TASK_RUNNING);
43091@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
43092 pool->dirty_watermark = params->dirty_watermark;
43093 pool->dirty_len = 0;
43094 spin_lock_init(&pool->pool_lock);
43095- atomic_set(&pool->req_ser, 0);
43096- atomic_set(&pool->flush_ser, 0);
43097+ atomic_set_unchecked(&pool->req_ser, 0);
43098+ atomic_set_unchecked(&pool->flush_ser, 0);
43099 init_waitqueue_head(&pool->force_wait);
43100
43101 pool->thread = kthread_run(ib_fmr_cleanup_thread,
43102@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
43103 }
43104 spin_unlock_irq(&pool->pool_lock);
43105
43106- serial = atomic_inc_return(&pool->req_ser);
43107+ serial = atomic_inc_return_unchecked(&pool->req_ser);
43108 wake_up_process(pool->thread);
43109
43110 if (wait_event_interruptible(pool->force_wait,
43111- atomic_read(&pool->flush_ser) - serial >= 0))
43112+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
43113 return -EINTR;
43114
43115 return 0;
43116@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
43117 } else {
43118 list_add_tail(&fmr->list, &pool->dirty_list);
43119 if (++pool->dirty_len >= pool->dirty_watermark) {
43120- atomic_inc(&pool->req_ser);
43121+ atomic_inc_unchecked(&pool->req_ser);
43122 wake_up_process(pool->thread);
43123 }
43124 }
43125diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
43126index a9f0489..27a161b 100644
43127--- a/drivers/infiniband/core/uverbs_cmd.c
43128+++ b/drivers/infiniband/core/uverbs_cmd.c
43129@@ -951,6 +951,9 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
43130 if (copy_from_user(&cmd, buf, sizeof cmd))
43131 return -EFAULT;
43132
43133+ if (!access_ok_noprefault(VERIFY_READ, cmd.start, cmd.length))
43134+ return -EFAULT;
43135+
43136 INIT_UDATA(&udata, buf + sizeof cmd,
43137 (unsigned long) cmd.response + sizeof resp,
43138 in_len - sizeof cmd, out_len - sizeof resp);
43139diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
43140index 6791fd1..78bdcdf 100644
43141--- a/drivers/infiniband/hw/cxgb4/mem.c
43142+++ b/drivers/infiniband/hw/cxgb4/mem.c
43143@@ -256,7 +256,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
43144 int err;
43145 struct fw_ri_tpte tpt;
43146 u32 stag_idx;
43147- static atomic_t key;
43148+ static atomic_unchecked_t key;
43149
43150 if (c4iw_fatal_error(rdev))
43151 return -EIO;
43152@@ -277,7 +277,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
43153 if (rdev->stats.stag.cur > rdev->stats.stag.max)
43154 rdev->stats.stag.max = rdev->stats.stag.cur;
43155 mutex_unlock(&rdev->stats.lock);
43156- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
43157+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
43158 }
43159 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
43160 __func__, stag_state, type, pdid, stag_idx);
43161diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
43162index 79b3dbc..96e5fcc 100644
43163--- a/drivers/infiniband/hw/ipath/ipath_rc.c
43164+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
43165@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
43166 struct ib_atomic_eth *ateth;
43167 struct ipath_ack_entry *e;
43168 u64 vaddr;
43169- atomic64_t *maddr;
43170+ atomic64_unchecked_t *maddr;
43171 u64 sdata;
43172 u32 rkey;
43173 u8 next;
43174@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
43175 IB_ACCESS_REMOTE_ATOMIC)))
43176 goto nack_acc_unlck;
43177 /* Perform atomic OP and save result. */
43178- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
43179+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
43180 sdata = be64_to_cpu(ateth->swap_data);
43181 e = &qp->s_ack_queue[qp->r_head_ack_queue];
43182 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
43183- (u64) atomic64_add_return(sdata, maddr) - sdata :
43184+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
43185 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
43186 be64_to_cpu(ateth->compare_data),
43187 sdata);
43188diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
43189index 1f95bba..9530f87 100644
43190--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
43191+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
43192@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
43193 unsigned long flags;
43194 struct ib_wc wc;
43195 u64 sdata;
43196- atomic64_t *maddr;
43197+ atomic64_unchecked_t *maddr;
43198 enum ib_wc_status send_status;
43199
43200 /*
43201@@ -382,11 +382,11 @@ again:
43202 IB_ACCESS_REMOTE_ATOMIC)))
43203 goto acc_err;
43204 /* Perform atomic OP and save result. */
43205- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
43206+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
43207 sdata = wqe->wr.wr.atomic.compare_add;
43208 *(u64 *) sqp->s_sge.sge.vaddr =
43209 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
43210- (u64) atomic64_add_return(sdata, maddr) - sdata :
43211+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
43212 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
43213 sdata, wqe->wr.wr.atomic.swap);
43214 goto send_comp;
43215diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
43216index 5904026..f1c30e5 100644
43217--- a/drivers/infiniband/hw/mlx4/mad.c
43218+++ b/drivers/infiniband/hw/mlx4/mad.c
43219@@ -106,7 +106,7 @@ __be64 mlx4_ib_gen_node_guid(void)
43220
43221 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
43222 {
43223- return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
43224+ return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
43225 cpu_to_be64(0xff00000000000000LL);
43226 }
43227
43228diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
43229index ed327e6..ca1739e0 100644
43230--- a/drivers/infiniband/hw/mlx4/mcg.c
43231+++ b/drivers/infiniband/hw/mlx4/mcg.c
43232@@ -1041,7 +1041,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
43233 {
43234 char name[20];
43235
43236- atomic_set(&ctx->tid, 0);
43237+ atomic_set_unchecked(&ctx->tid, 0);
43238 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
43239 ctx->mcg_wq = create_singlethread_workqueue(name);
43240 if (!ctx->mcg_wq)
43241diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
43242index f829fd9..1a8d436 100644
43243--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
43244+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
43245@@ -439,7 +439,7 @@ struct mlx4_ib_demux_ctx {
43246 struct list_head mcg_mgid0_list;
43247 struct workqueue_struct *mcg_wq;
43248 struct mlx4_ib_demux_pv_ctx **tun;
43249- atomic_t tid;
43250+ atomic_unchecked_t tid;
43251 int flushing; /* flushing the work queue */
43252 };
43253
43254diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
43255index 9d3e5c1..6f166df 100644
43256--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
43257+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
43258@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
43259 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
43260 }
43261
43262-int mthca_QUERY_FW(struct mthca_dev *dev)
43263+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
43264 {
43265 struct mthca_mailbox *mailbox;
43266 u32 *outbox;
43267@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43268 CMD_TIME_CLASS_B);
43269 }
43270
43271-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43272+int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43273 int num_mtt)
43274 {
43275 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
43276@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
43277 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
43278 }
43279
43280-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43281+int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43282 int eq_num)
43283 {
43284 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
43285@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
43286 CMD_TIME_CLASS_B);
43287 }
43288
43289-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
43290+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
43291 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
43292 void *in_mad, void *response_mad)
43293 {
43294diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
43295index ded76c1..0cf0a08 100644
43296--- a/drivers/infiniband/hw/mthca/mthca_main.c
43297+++ b/drivers/infiniband/hw/mthca/mthca_main.c
43298@@ -692,7 +692,7 @@ err_close:
43299 return err;
43300 }
43301
43302-static int mthca_setup_hca(struct mthca_dev *dev)
43303+static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
43304 {
43305 int err;
43306
43307diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
43308index ed9a989..6aa5dc2 100644
43309--- a/drivers/infiniband/hw/mthca/mthca_mr.c
43310+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
43311@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
43312 * through the bitmaps)
43313 */
43314
43315-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
43316+static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
43317 {
43318 int o;
43319 int m;
43320@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
43321 return key;
43322 }
43323
43324-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
43325+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
43326 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
43327 {
43328 struct mthca_mailbox *mailbox;
43329@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
43330 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
43331 }
43332
43333-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
43334+int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
43335 u64 *buffer_list, int buffer_size_shift,
43336 int list_len, u64 iova, u64 total_size,
43337 u32 access, struct mthca_mr *mr)
43338diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
43339index 415f8e1..e34214e 100644
43340--- a/drivers/infiniband/hw/mthca/mthca_provider.c
43341+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
43342@@ -764,7 +764,7 @@ unlock:
43343 return 0;
43344 }
43345
43346-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
43347+static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
43348 {
43349 struct mthca_dev *dev = to_mdev(ibcq->device);
43350 struct mthca_cq *cq = to_mcq(ibcq);
43351diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
43352index 3b2a6dc..bce26ff 100644
43353--- a/drivers/infiniband/hw/nes/nes.c
43354+++ b/drivers/infiniband/hw/nes/nes.c
43355@@ -97,7 +97,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
43356 LIST_HEAD(nes_adapter_list);
43357 static LIST_HEAD(nes_dev_list);
43358
43359-atomic_t qps_destroyed;
43360+atomic_unchecked_t qps_destroyed;
43361
43362 static unsigned int ee_flsh_adapter;
43363 static unsigned int sysfs_nonidx_addr;
43364@@ -278,7 +278,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
43365 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
43366 struct nes_adapter *nesadapter = nesdev->nesadapter;
43367
43368- atomic_inc(&qps_destroyed);
43369+ atomic_inc_unchecked(&qps_destroyed);
43370
43371 /* Free the control structures */
43372
43373diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
43374index bd9d132..70d84f4 100644
43375--- a/drivers/infiniband/hw/nes/nes.h
43376+++ b/drivers/infiniband/hw/nes/nes.h
43377@@ -180,17 +180,17 @@ extern unsigned int nes_debug_level;
43378 extern unsigned int wqm_quanta;
43379 extern struct list_head nes_adapter_list;
43380
43381-extern atomic_t cm_connects;
43382-extern atomic_t cm_accepts;
43383-extern atomic_t cm_disconnects;
43384-extern atomic_t cm_closes;
43385-extern atomic_t cm_connecteds;
43386-extern atomic_t cm_connect_reqs;
43387-extern atomic_t cm_rejects;
43388-extern atomic_t mod_qp_timouts;
43389-extern atomic_t qps_created;
43390-extern atomic_t qps_destroyed;
43391-extern atomic_t sw_qps_destroyed;
43392+extern atomic_unchecked_t cm_connects;
43393+extern atomic_unchecked_t cm_accepts;
43394+extern atomic_unchecked_t cm_disconnects;
43395+extern atomic_unchecked_t cm_closes;
43396+extern atomic_unchecked_t cm_connecteds;
43397+extern atomic_unchecked_t cm_connect_reqs;
43398+extern atomic_unchecked_t cm_rejects;
43399+extern atomic_unchecked_t mod_qp_timouts;
43400+extern atomic_unchecked_t qps_created;
43401+extern atomic_unchecked_t qps_destroyed;
43402+extern atomic_unchecked_t sw_qps_destroyed;
43403 extern u32 mh_detected;
43404 extern u32 mh_pauses_sent;
43405 extern u32 cm_packets_sent;
43406@@ -199,16 +199,16 @@ extern u32 cm_packets_created;
43407 extern u32 cm_packets_received;
43408 extern u32 cm_packets_dropped;
43409 extern u32 cm_packets_retrans;
43410-extern atomic_t cm_listens_created;
43411-extern atomic_t cm_listens_destroyed;
43412+extern atomic_unchecked_t cm_listens_created;
43413+extern atomic_unchecked_t cm_listens_destroyed;
43414 extern u32 cm_backlog_drops;
43415-extern atomic_t cm_loopbacks;
43416-extern atomic_t cm_nodes_created;
43417-extern atomic_t cm_nodes_destroyed;
43418-extern atomic_t cm_accel_dropped_pkts;
43419-extern atomic_t cm_resets_recvd;
43420-extern atomic_t pau_qps_created;
43421-extern atomic_t pau_qps_destroyed;
43422+extern atomic_unchecked_t cm_loopbacks;
43423+extern atomic_unchecked_t cm_nodes_created;
43424+extern atomic_unchecked_t cm_nodes_destroyed;
43425+extern atomic_unchecked_t cm_accel_dropped_pkts;
43426+extern atomic_unchecked_t cm_resets_recvd;
43427+extern atomic_unchecked_t pau_qps_created;
43428+extern atomic_unchecked_t pau_qps_destroyed;
43429
43430 extern u32 int_mod_timer_init;
43431 extern u32 int_mod_cq_depth_256;
43432diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
43433index 6f09a72..cf4399d 100644
43434--- a/drivers/infiniband/hw/nes/nes_cm.c
43435+++ b/drivers/infiniband/hw/nes/nes_cm.c
43436@@ -69,14 +69,14 @@ u32 cm_packets_dropped;
43437 u32 cm_packets_retrans;
43438 u32 cm_packets_created;
43439 u32 cm_packets_received;
43440-atomic_t cm_listens_created;
43441-atomic_t cm_listens_destroyed;
43442+atomic_unchecked_t cm_listens_created;
43443+atomic_unchecked_t cm_listens_destroyed;
43444 u32 cm_backlog_drops;
43445-atomic_t cm_loopbacks;
43446-atomic_t cm_nodes_created;
43447-atomic_t cm_nodes_destroyed;
43448-atomic_t cm_accel_dropped_pkts;
43449-atomic_t cm_resets_recvd;
43450+atomic_unchecked_t cm_loopbacks;
43451+atomic_unchecked_t cm_nodes_created;
43452+atomic_unchecked_t cm_nodes_destroyed;
43453+atomic_unchecked_t cm_accel_dropped_pkts;
43454+atomic_unchecked_t cm_resets_recvd;
43455
43456 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
43457 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
43458@@ -135,28 +135,28 @@ static void record_ird_ord(struct nes_cm_node *, u16, u16);
43459 /* instance of function pointers for client API */
43460 /* set address of this instance to cm_core->cm_ops at cm_core alloc */
43461 static struct nes_cm_ops nes_cm_api = {
43462- mini_cm_accelerated,
43463- mini_cm_listen,
43464- mini_cm_del_listen,
43465- mini_cm_connect,
43466- mini_cm_close,
43467- mini_cm_accept,
43468- mini_cm_reject,
43469- mini_cm_recv_pkt,
43470- mini_cm_dealloc_core,
43471- mini_cm_get,
43472- mini_cm_set
43473+ .accelerated = mini_cm_accelerated,
43474+ .listen = mini_cm_listen,
43475+ .stop_listener = mini_cm_del_listen,
43476+ .connect = mini_cm_connect,
43477+ .close = mini_cm_close,
43478+ .accept = mini_cm_accept,
43479+ .reject = mini_cm_reject,
43480+ .recv_pkt = mini_cm_recv_pkt,
43481+ .destroy_cm_core = mini_cm_dealloc_core,
43482+ .get = mini_cm_get,
43483+ .set = mini_cm_set
43484 };
43485
43486 static struct nes_cm_core *g_cm_core;
43487
43488-atomic_t cm_connects;
43489-atomic_t cm_accepts;
43490-atomic_t cm_disconnects;
43491-atomic_t cm_closes;
43492-atomic_t cm_connecteds;
43493-atomic_t cm_connect_reqs;
43494-atomic_t cm_rejects;
43495+atomic_unchecked_t cm_connects;
43496+atomic_unchecked_t cm_accepts;
43497+atomic_unchecked_t cm_disconnects;
43498+atomic_unchecked_t cm_closes;
43499+atomic_unchecked_t cm_connecteds;
43500+atomic_unchecked_t cm_connect_reqs;
43501+atomic_unchecked_t cm_rejects;
43502
43503 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
43504 {
43505@@ -1436,7 +1436,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
43506 kfree(listener);
43507 listener = NULL;
43508 ret = 0;
43509- atomic_inc(&cm_listens_destroyed);
43510+ atomic_inc_unchecked(&cm_listens_destroyed);
43511 } else {
43512 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
43513 }
43514@@ -1637,7 +1637,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
43515 cm_node->rem_mac);
43516
43517 add_hte_node(cm_core, cm_node);
43518- atomic_inc(&cm_nodes_created);
43519+ atomic_inc_unchecked(&cm_nodes_created);
43520
43521 return cm_node;
43522 }
43523@@ -1698,7 +1698,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
43524 }
43525
43526 atomic_dec(&cm_core->node_cnt);
43527- atomic_inc(&cm_nodes_destroyed);
43528+ atomic_inc_unchecked(&cm_nodes_destroyed);
43529 nesqp = cm_node->nesqp;
43530 if (nesqp) {
43531 nesqp->cm_node = NULL;
43532@@ -1762,7 +1762,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
43533
43534 static void drop_packet(struct sk_buff *skb)
43535 {
43536- atomic_inc(&cm_accel_dropped_pkts);
43537+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
43538 dev_kfree_skb_any(skb);
43539 }
43540
43541@@ -1825,7 +1825,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
43542 {
43543
43544 int reset = 0; /* whether to send reset in case of err.. */
43545- atomic_inc(&cm_resets_recvd);
43546+ atomic_inc_unchecked(&cm_resets_recvd);
43547 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
43548 " refcnt=%d\n", cm_node, cm_node->state,
43549 atomic_read(&cm_node->ref_count));
43550@@ -2492,7 +2492,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
43551 rem_ref_cm_node(cm_node->cm_core, cm_node);
43552 return NULL;
43553 }
43554- atomic_inc(&cm_loopbacks);
43555+ atomic_inc_unchecked(&cm_loopbacks);
43556 loopbackremotenode->loopbackpartner = cm_node;
43557 loopbackremotenode->tcp_cntxt.rcv_wscale =
43558 NES_CM_DEFAULT_RCV_WND_SCALE;
43559@@ -2773,7 +2773,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
43560 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
43561 else {
43562 rem_ref_cm_node(cm_core, cm_node);
43563- atomic_inc(&cm_accel_dropped_pkts);
43564+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
43565 dev_kfree_skb_any(skb);
43566 }
43567 break;
43568@@ -3081,7 +3081,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
43569
43570 if ((cm_id) && (cm_id->event_handler)) {
43571 if (issue_disconn) {
43572- atomic_inc(&cm_disconnects);
43573+ atomic_inc_unchecked(&cm_disconnects);
43574 cm_event.event = IW_CM_EVENT_DISCONNECT;
43575 cm_event.status = disconn_status;
43576 cm_event.local_addr = cm_id->local_addr;
43577@@ -3103,7 +3103,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
43578 }
43579
43580 if (issue_close) {
43581- atomic_inc(&cm_closes);
43582+ atomic_inc_unchecked(&cm_closes);
43583 nes_disconnect(nesqp, 1);
43584
43585 cm_id->provider_data = nesqp;
43586@@ -3241,7 +3241,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43587
43588 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
43589 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
43590- atomic_inc(&cm_accepts);
43591+ atomic_inc_unchecked(&cm_accepts);
43592
43593 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
43594 netdev_refcnt_read(nesvnic->netdev));
43595@@ -3439,7 +3439,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
43596 struct nes_cm_core *cm_core;
43597 u8 *start_buff;
43598
43599- atomic_inc(&cm_rejects);
43600+ atomic_inc_unchecked(&cm_rejects);
43601 cm_node = (struct nes_cm_node *)cm_id->provider_data;
43602 loopback = cm_node->loopbackpartner;
43603 cm_core = cm_node->cm_core;
43604@@ -3504,7 +3504,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43605 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
43606 ntohs(laddr->sin_port));
43607
43608- atomic_inc(&cm_connects);
43609+ atomic_inc_unchecked(&cm_connects);
43610 nesqp->active_conn = 1;
43611
43612 /* cache the cm_id in the qp */
43613@@ -3649,7 +3649,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
43614 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
43615 return err;
43616 }
43617- atomic_inc(&cm_listens_created);
43618+ atomic_inc_unchecked(&cm_listens_created);
43619 }
43620
43621 cm_id->add_ref(cm_id);
43622@@ -3756,7 +3756,7 @@ static void cm_event_connected(struct nes_cm_event *event)
43623
43624 if (nesqp->destroyed)
43625 return;
43626- atomic_inc(&cm_connecteds);
43627+ atomic_inc_unchecked(&cm_connecteds);
43628 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
43629 " local port 0x%04X. jiffies = %lu.\n",
43630 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
43631@@ -3941,7 +3941,7 @@ static void cm_event_reset(struct nes_cm_event *event)
43632
43633 cm_id->add_ref(cm_id);
43634 ret = cm_id->event_handler(cm_id, &cm_event);
43635- atomic_inc(&cm_closes);
43636+ atomic_inc_unchecked(&cm_closes);
43637 cm_event.event = IW_CM_EVENT_CLOSE;
43638 cm_event.status = 0;
43639 cm_event.provider_data = cm_id->provider_data;
43640@@ -3981,7 +3981,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
43641 return;
43642 cm_id = cm_node->cm_id;
43643
43644- atomic_inc(&cm_connect_reqs);
43645+ atomic_inc_unchecked(&cm_connect_reqs);
43646 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43647 cm_node, cm_id, jiffies);
43648
43649@@ -4030,7 +4030,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
43650 return;
43651 cm_id = cm_node->cm_id;
43652
43653- atomic_inc(&cm_connect_reqs);
43654+ atomic_inc_unchecked(&cm_connect_reqs);
43655 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43656 cm_node, cm_id, jiffies);
43657
43658diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
43659index 4166452..fc952c3 100644
43660--- a/drivers/infiniband/hw/nes/nes_mgt.c
43661+++ b/drivers/infiniband/hw/nes/nes_mgt.c
43662@@ -40,8 +40,8 @@
43663 #include "nes.h"
43664 #include "nes_mgt.h"
43665
43666-atomic_t pau_qps_created;
43667-atomic_t pau_qps_destroyed;
43668+atomic_unchecked_t pau_qps_created;
43669+atomic_unchecked_t pau_qps_destroyed;
43670
43671 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
43672 {
43673@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
43674 {
43675 struct sk_buff *skb;
43676 unsigned long flags;
43677- atomic_inc(&pau_qps_destroyed);
43678+ atomic_inc_unchecked(&pau_qps_destroyed);
43679
43680 /* Free packets that have not yet been forwarded */
43681 /* Lock is acquired by skb_dequeue when removing the skb */
43682@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
43683 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
43684 skb_queue_head_init(&nesqp->pau_list);
43685 spin_lock_init(&nesqp->pau_lock);
43686- atomic_inc(&pau_qps_created);
43687+ atomic_inc_unchecked(&pau_qps_created);
43688 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
43689 }
43690
43691diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
43692index 70acda9..a96de9d 100644
43693--- a/drivers/infiniband/hw/nes/nes_nic.c
43694+++ b/drivers/infiniband/hw/nes/nes_nic.c
43695@@ -1274,39 +1274,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
43696 target_stat_values[++index] = mh_detected;
43697 target_stat_values[++index] = mh_pauses_sent;
43698 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
43699- target_stat_values[++index] = atomic_read(&cm_connects);
43700- target_stat_values[++index] = atomic_read(&cm_accepts);
43701- target_stat_values[++index] = atomic_read(&cm_disconnects);
43702- target_stat_values[++index] = atomic_read(&cm_connecteds);
43703- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
43704- target_stat_values[++index] = atomic_read(&cm_rejects);
43705- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
43706- target_stat_values[++index] = atomic_read(&qps_created);
43707- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
43708- target_stat_values[++index] = atomic_read(&qps_destroyed);
43709- target_stat_values[++index] = atomic_read(&cm_closes);
43710+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
43711+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
43712+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
43713+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
43714+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
43715+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
43716+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
43717+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
43718+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
43719+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
43720+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
43721 target_stat_values[++index] = cm_packets_sent;
43722 target_stat_values[++index] = cm_packets_bounced;
43723 target_stat_values[++index] = cm_packets_created;
43724 target_stat_values[++index] = cm_packets_received;
43725 target_stat_values[++index] = cm_packets_dropped;
43726 target_stat_values[++index] = cm_packets_retrans;
43727- target_stat_values[++index] = atomic_read(&cm_listens_created);
43728- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
43729+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
43730+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
43731 target_stat_values[++index] = cm_backlog_drops;
43732- target_stat_values[++index] = atomic_read(&cm_loopbacks);
43733- target_stat_values[++index] = atomic_read(&cm_nodes_created);
43734- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
43735- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
43736- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
43737+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
43738+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
43739+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
43740+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
43741+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
43742 target_stat_values[++index] = nesadapter->free_4kpbl;
43743 target_stat_values[++index] = nesadapter->free_256pbl;
43744 target_stat_values[++index] = int_mod_timer_init;
43745 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
43746 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
43747 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
43748- target_stat_values[++index] = atomic_read(&pau_qps_created);
43749- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
43750+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
43751+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
43752 }
43753
43754 /**
43755diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
43756index c0d0296..3185f57 100644
43757--- a/drivers/infiniband/hw/nes/nes_verbs.c
43758+++ b/drivers/infiniband/hw/nes/nes_verbs.c
43759@@ -46,9 +46,9 @@
43760
43761 #include <rdma/ib_umem.h>
43762
43763-atomic_t mod_qp_timouts;
43764-atomic_t qps_created;
43765-atomic_t sw_qps_destroyed;
43766+atomic_unchecked_t mod_qp_timouts;
43767+atomic_unchecked_t qps_created;
43768+atomic_unchecked_t sw_qps_destroyed;
43769
43770 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
43771
43772@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
43773 if (init_attr->create_flags)
43774 return ERR_PTR(-EINVAL);
43775
43776- atomic_inc(&qps_created);
43777+ atomic_inc_unchecked(&qps_created);
43778 switch (init_attr->qp_type) {
43779 case IB_QPT_RC:
43780 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
43781@@ -1468,7 +1468,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
43782 struct iw_cm_event cm_event;
43783 int ret = 0;
43784
43785- atomic_inc(&sw_qps_destroyed);
43786+ atomic_inc_unchecked(&sw_qps_destroyed);
43787 nesqp->destroyed = 1;
43788
43789 /* Blow away the connection if it exists. */
43790diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
43791index ffd48bf..83cdb56 100644
43792--- a/drivers/infiniband/hw/qib/qib.h
43793+++ b/drivers/infiniband/hw/qib/qib.h
43794@@ -52,6 +52,7 @@
43795 #include <linux/kref.h>
43796 #include <linux/sched.h>
43797 #include <linux/kthread.h>
43798+#include <linux/slab.h>
43799
43800 #include "qib_common.h"
43801 #include "qib_verbs.h"
43802diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43803index cdc7df4..a2fdfdb 100644
43804--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43805+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43806@@ -156,7 +156,7 @@ static size_t ipoib_get_size(const struct net_device *dev)
43807 nla_total_size(2); /* IFLA_IPOIB_UMCAST */
43808 }
43809
43810-static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
43811+static struct rtnl_link_ops ipoib_link_ops = {
43812 .kind = "ipoib",
43813 .maxtype = IFLA_IPOIB_MAX,
43814 .policy = ipoib_policy,
43815diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
43816index e853a21..56fc5a8 100644
43817--- a/drivers/input/gameport/gameport.c
43818+++ b/drivers/input/gameport/gameport.c
43819@@ -527,14 +527,14 @@ EXPORT_SYMBOL(gameport_set_phys);
43820 */
43821 static void gameport_init_port(struct gameport *gameport)
43822 {
43823- static atomic_t gameport_no = ATOMIC_INIT(-1);
43824+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(-1);
43825
43826 __module_get(THIS_MODULE);
43827
43828 mutex_init(&gameport->drv_mutex);
43829 device_initialize(&gameport->dev);
43830 dev_set_name(&gameport->dev, "gameport%lu",
43831- (unsigned long)atomic_inc_return(&gameport_no));
43832+ (unsigned long)atomic_inc_return_unchecked(&gameport_no));
43833 gameport->dev.bus = &gameport_bus;
43834 gameport->dev.release = gameport_release_port;
43835 if (gameport->parent)
43836diff --git a/drivers/input/input.c b/drivers/input/input.c
43837index cc357f1..ee42fbc 100644
43838--- a/drivers/input/input.c
43839+++ b/drivers/input/input.c
43840@@ -1781,7 +1781,7 @@ EXPORT_SYMBOL_GPL(input_class);
43841 */
43842 struct input_dev *input_allocate_device(void)
43843 {
43844- static atomic_t input_no = ATOMIC_INIT(-1);
43845+ static atomic_unchecked_t input_no = ATOMIC_INIT(-1);
43846 struct input_dev *dev;
43847
43848 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
43849@@ -1796,7 +1796,7 @@ struct input_dev *input_allocate_device(void)
43850 INIT_LIST_HEAD(&dev->node);
43851
43852 dev_set_name(&dev->dev, "input%lu",
43853- (unsigned long)atomic_inc_return(&input_no));
43854+ (unsigned long)atomic_inc_return_unchecked(&input_no));
43855
43856 __module_get(THIS_MODULE);
43857 }
43858diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
43859index 4a95b22..874c182 100644
43860--- a/drivers/input/joystick/sidewinder.c
43861+++ b/drivers/input/joystick/sidewinder.c
43862@@ -30,6 +30,7 @@
43863 #include <linux/kernel.h>
43864 #include <linux/module.h>
43865 #include <linux/slab.h>
43866+#include <linux/sched.h>
43867 #include <linux/input.h>
43868 #include <linux/gameport.h>
43869 #include <linux/jiffies.h>
43870diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
43871index 3aa2f3f..53c00ea 100644
43872--- a/drivers/input/joystick/xpad.c
43873+++ b/drivers/input/joystick/xpad.c
43874@@ -886,7 +886,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
43875
43876 static int xpad_led_probe(struct usb_xpad *xpad)
43877 {
43878- static atomic_t led_seq = ATOMIC_INIT(-1);
43879+ static atomic_unchecked_t led_seq = ATOMIC_INIT(-1);
43880 unsigned long led_no;
43881 struct xpad_led *led;
43882 struct led_classdev *led_cdev;
43883@@ -899,7 +899,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
43884 if (!led)
43885 return -ENOMEM;
43886
43887- led_no = atomic_inc_return(&led_seq);
43888+ led_no = atomic_inc_return_unchecked(&led_seq);
43889
43890 snprintf(led->name, sizeof(led->name), "xpad%lu", led_no);
43891 led->xpad = xpad;
43892diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
43893index ac1fa5f..5f7502c 100644
43894--- a/drivers/input/misc/ims-pcu.c
43895+++ b/drivers/input/misc/ims-pcu.c
43896@@ -1851,7 +1851,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
43897
43898 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
43899 {
43900- static atomic_t device_no = ATOMIC_INIT(-1);
43901+ static atomic_unchecked_t device_no = ATOMIC_INIT(-1);
43902
43903 const struct ims_pcu_device_info *info;
43904 int error;
43905@@ -1882,7 +1882,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
43906 }
43907
43908 /* Device appears to be operable, complete initialization */
43909- pcu->device_no = atomic_inc_return(&device_no);
43910+ pcu->device_no = atomic_inc_return_unchecked(&device_no);
43911
43912 /*
43913 * PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor
43914diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
43915index d02e1bd..d719719 100644
43916--- a/drivers/input/mouse/psmouse.h
43917+++ b/drivers/input/mouse/psmouse.h
43918@@ -124,7 +124,7 @@ struct psmouse_attribute {
43919 ssize_t (*set)(struct psmouse *psmouse, void *data,
43920 const char *buf, size_t count);
43921 bool protect;
43922-};
43923+} __do_const;
43924 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
43925
43926 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
43927diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
43928index b604564..3f14ae4 100644
43929--- a/drivers/input/mousedev.c
43930+++ b/drivers/input/mousedev.c
43931@@ -744,7 +744,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
43932
43933 spin_unlock_irq(&client->packet_lock);
43934
43935- if (copy_to_user(buffer, data, count))
43936+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
43937 return -EFAULT;
43938
43939 return count;
43940diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
43941index a05a517..323a2fd 100644
43942--- a/drivers/input/serio/serio.c
43943+++ b/drivers/input/serio/serio.c
43944@@ -514,7 +514,7 @@ static void serio_release_port(struct device *dev)
43945 */
43946 static void serio_init_port(struct serio *serio)
43947 {
43948- static atomic_t serio_no = ATOMIC_INIT(-1);
43949+ static atomic_unchecked_t serio_no = ATOMIC_INIT(-1);
43950
43951 __module_get(THIS_MODULE);
43952
43953@@ -525,7 +525,7 @@ static void serio_init_port(struct serio *serio)
43954 mutex_init(&serio->drv_mutex);
43955 device_initialize(&serio->dev);
43956 dev_set_name(&serio->dev, "serio%lu",
43957- (unsigned long)atomic_inc_return(&serio_no));
43958+ (unsigned long)atomic_inc_return_unchecked(&serio_no));
43959 serio->dev.bus = &serio_bus;
43960 serio->dev.release = serio_release_port;
43961 serio->dev.groups = serio_device_attr_groups;
43962diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
43963index 71ef5d6..93380a9 100644
43964--- a/drivers/input/serio/serio_raw.c
43965+++ b/drivers/input/serio/serio_raw.c
43966@@ -292,7 +292,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
43967
43968 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
43969 {
43970- static atomic_t serio_raw_no = ATOMIC_INIT(-1);
43971+ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(-1);
43972 struct serio_raw *serio_raw;
43973 int err;
43974
43975@@ -303,7 +303,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
43976 }
43977
43978 snprintf(serio_raw->name, sizeof(serio_raw->name),
43979- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no));
43980+ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no));
43981 kref_init(&serio_raw->kref);
43982 INIT_LIST_HEAD(&serio_raw->client_list);
43983 init_waitqueue_head(&serio_raw->wait);
43984diff --git a/drivers/input/touchscreen/htcpen.c b/drivers/input/touchscreen/htcpen.c
43985index 92e2243..8fd9092 100644
43986--- a/drivers/input/touchscreen/htcpen.c
43987+++ b/drivers/input/touchscreen/htcpen.c
43988@@ -219,7 +219,7 @@ static struct isa_driver htcpen_isa_driver = {
43989 }
43990 };
43991
43992-static struct dmi_system_id htcshift_dmi_table[] __initdata = {
43993+static const struct dmi_system_id htcshift_dmi_table[] __initconst = {
43994 {
43995 .ident = "Shift",
43996 .matches = {
43997diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
43998index 48882c1..93e0987 100644
43999--- a/drivers/iommu/amd_iommu.c
44000+++ b/drivers/iommu/amd_iommu.c
44001@@ -823,11 +823,21 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
44002
44003 static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
44004 {
44005+ phys_addr_t physaddr;
44006 WARN_ON(address & 0x7ULL);
44007
44008 memset(cmd, 0, sizeof(*cmd));
44009- cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
44010- cmd->data[1] = upper_32_bits(__pa(address));
44011+
44012+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
44013+ if (object_starts_on_stack((void *)address)) {
44014+ void *adjbuf = (void *)address - current->stack + current->lowmem_stack;
44015+ physaddr = __pa((u64)adjbuf);
44016+ } else
44017+#endif
44018+ physaddr = __pa(address);
44019+
44020+ cmd->data[0] = lower_32_bits(physaddr) | CMD_COMPL_WAIT_STORE_MASK;
44021+ cmd->data[1] = upper_32_bits(physaddr);
44022 cmd->data[2] = 1;
44023 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
44024 }
44025diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
44026index a3adde6..988ee96 100644
44027--- a/drivers/iommu/arm-smmu.c
44028+++ b/drivers/iommu/arm-smmu.c
44029@@ -338,7 +338,7 @@ enum arm_smmu_domain_stage {
44030
44031 struct arm_smmu_domain {
44032 struct arm_smmu_device *smmu;
44033- struct io_pgtable_ops *pgtbl_ops;
44034+ struct io_pgtable *pgtbl;
44035 spinlock_t pgtbl_lock;
44036 struct arm_smmu_cfg cfg;
44037 enum arm_smmu_domain_stage stage;
44038@@ -833,7 +833,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
44039 {
44040 int irq, start, ret = 0;
44041 unsigned long ias, oas;
44042- struct io_pgtable_ops *pgtbl_ops;
44043+ struct io_pgtable *pgtbl;
44044 struct io_pgtable_cfg pgtbl_cfg;
44045 enum io_pgtable_fmt fmt;
44046 struct arm_smmu_domain *smmu_domain = domain->priv;
44047@@ -918,14 +918,16 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
44048 };
44049
44050 smmu_domain->smmu = smmu;
44051- pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
44052- if (!pgtbl_ops) {
44053+ pgtbl = alloc_io_pgtable(fmt, &pgtbl_cfg, smmu_domain);
44054+ if (!pgtbl) {
44055 ret = -ENOMEM;
44056 goto out_clear_smmu;
44057 }
44058
44059 /* Update our support page sizes to reflect the page table format */
44060- arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
44061+ pax_open_kernel();
44062+ *(unsigned long *)&arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
44063+ pax_close_kernel();
44064
44065 /* Initialise the context bank with our page table cfg */
44066 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
44067@@ -946,7 +948,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
44068 mutex_unlock(&smmu_domain->init_mutex);
44069
44070 /* Publish page table ops for map/unmap */
44071- smmu_domain->pgtbl_ops = pgtbl_ops;
44072+ smmu_domain->pgtbl = pgtbl;
44073 return 0;
44074
44075 out_clear_smmu:
44076@@ -979,8 +981,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
44077 free_irq(irq, domain);
44078 }
44079
44080- if (smmu_domain->pgtbl_ops)
44081- free_io_pgtable_ops(smmu_domain->pgtbl_ops);
44082+ free_io_pgtable(smmu_domain->pgtbl);
44083
44084 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
44085 }
44086@@ -1204,13 +1205,13 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
44087 int ret;
44088 unsigned long flags;
44089 struct arm_smmu_domain *smmu_domain = domain->priv;
44090- struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
44091+ struct io_pgtable *iop = smmu_domain->pgtbl;
44092
44093- if (!ops)
44094+ if (!iop)
44095 return -ENODEV;
44096
44097 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
44098- ret = ops->map(ops, iova, paddr, size, prot);
44099+ ret = iop->ops->map(iop, iova, paddr, size, prot);
44100 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
44101 return ret;
44102 }
44103@@ -1221,13 +1222,13 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
44104 size_t ret;
44105 unsigned long flags;
44106 struct arm_smmu_domain *smmu_domain = domain->priv;
44107- struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
44108+ struct io_pgtable *iop = smmu_domain->pgtbl;
44109
44110- if (!ops)
44111+ if (!iop)
44112 return 0;
44113
44114 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
44115- ret = ops->unmap(ops, iova, size);
44116+ ret = iop->ops->unmap(iop, iova, size);
44117 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
44118 return ret;
44119 }
44120@@ -1238,7 +1239,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
44121 struct arm_smmu_domain *smmu_domain = domain->priv;
44122 struct arm_smmu_device *smmu = smmu_domain->smmu;
44123 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
44124- struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
44125+ struct io_pgtable *iop = smmu_domain->pgtbl;
44126 struct device *dev = smmu->dev;
44127 void __iomem *cb_base;
44128 u32 tmp;
44129@@ -1261,7 +1262,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
44130 dev_err(dev,
44131 "iova to phys timed out on 0x%pad. Falling back to software table walk.\n",
44132 &iova);
44133- return ops->iova_to_phys(ops, iova);
44134+ return iop->ops->iova_to_phys(iop, iova);
44135 }
44136
44137 phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO);
44138@@ -1282,9 +1283,9 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
44139 phys_addr_t ret;
44140 unsigned long flags;
44141 struct arm_smmu_domain *smmu_domain = domain->priv;
44142- struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
44143+ struct io_pgtable *iop = smmu_domain->pgtbl;
44144
44145- if (!ops)
44146+ if (!iop)
44147 return 0;
44148
44149 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
44150@@ -1292,7 +1293,7 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
44151 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
44152 ret = arm_smmu_iova_to_phys_hard(domain, iova);
44153 } else {
44154- ret = ops->iova_to_phys(ops, iova);
44155+ ret = iop->ops->iova_to_phys(iop, iova);
44156 }
44157
44158 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
44159@@ -1651,7 +1652,9 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
44160 size |= SZ_64K | SZ_512M;
44161 }
44162
44163- arm_smmu_ops.pgsize_bitmap &= size;
44164+ pax_open_kernel();
44165+ *(unsigned long *)&arm_smmu_ops.pgsize_bitmap &= size;
44166+ pax_close_kernel();
44167 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
44168
44169 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
44170diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
44171index b610a8d..08eb879 100644
44172--- a/drivers/iommu/io-pgtable-arm.c
44173+++ b/drivers/iommu/io-pgtable-arm.c
44174@@ -36,12 +36,6 @@
44175 #define io_pgtable_to_data(x) \
44176 container_of((x), struct arm_lpae_io_pgtable, iop)
44177
44178-#define io_pgtable_ops_to_pgtable(x) \
44179- container_of((x), struct io_pgtable, ops)
44180-
44181-#define io_pgtable_ops_to_data(x) \
44182- io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
44183-
44184 /*
44185 * For consistency with the architecture, we always consider
44186 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
44187@@ -302,10 +296,10 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
44188 return pte;
44189 }
44190
44191-static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
44192+static int arm_lpae_map(struct io_pgtable *iop, unsigned long iova,
44193 phys_addr_t paddr, size_t size, int iommu_prot)
44194 {
44195- struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
44196+ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
44197 arm_lpae_iopte *ptep = data->pgd;
44198 int lvl = ARM_LPAE_START_LVL(data);
44199 arm_lpae_iopte prot;
44200@@ -445,12 +439,11 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
44201 return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
44202 }
44203
44204-static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
44205+static int arm_lpae_unmap(struct io_pgtable *iop, unsigned long iova,
44206 size_t size)
44207 {
44208 size_t unmapped;
44209- struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
44210- struct io_pgtable *iop = &data->iop;
44211+ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
44212 arm_lpae_iopte *ptep = data->pgd;
44213 int lvl = ARM_LPAE_START_LVL(data);
44214
44215@@ -461,10 +454,10 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
44216 return unmapped;
44217 }
44218
44219-static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
44220+static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable *iop,
44221 unsigned long iova)
44222 {
44223- struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
44224+ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
44225 arm_lpae_iopte pte, *ptep = data->pgd;
44226 int lvl = ARM_LPAE_START_LVL(data);
44227
44228@@ -531,6 +524,12 @@ static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
44229 }
44230 }
44231
44232+static struct io_pgtable_ops arm_lpae_io_pgtable_ops = {
44233+ .map = arm_lpae_map,
44234+ .unmap = arm_lpae_unmap,
44235+ .iova_to_phys = arm_lpae_iova_to_phys,
44236+};
44237+
44238 static struct arm_lpae_io_pgtable *
44239 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
44240 {
44241@@ -562,11 +561,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
44242 pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
44243 data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
44244
44245- data->iop.ops = (struct io_pgtable_ops) {
44246- .map = arm_lpae_map,
44247- .unmap = arm_lpae_unmap,
44248- .iova_to_phys = arm_lpae_iova_to_phys,
44249- };
44250+ data->iop.ops = &arm_lpae_io_pgtable_ops;
44251
44252 return data;
44253 }
44254@@ -825,9 +820,9 @@ static struct iommu_gather_ops dummy_tlb_ops __initdata = {
44255 .flush_pgtable = dummy_flush_pgtable,
44256 };
44257
44258-static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
44259+static void __init arm_lpae_dump_ops(struct io_pgtable *iop)
44260 {
44261- struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
44262+ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
44263 struct io_pgtable_cfg *cfg = &data->iop.cfg;
44264
44265 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
44266@@ -837,9 +832,9 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
44267 data->bits_per_level, data->pgd);
44268 }
44269
44270-#define __FAIL(ops, i) ({ \
44271+#define __FAIL(iop, i) ({ \
44272 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
44273- arm_lpae_dump_ops(ops); \
44274+ arm_lpae_dump_ops(iop); \
44275 selftest_running = false; \
44276 -EFAULT; \
44277 })
44278@@ -854,30 +849,32 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
44279 int i, j;
44280 unsigned long iova;
44281 size_t size;
44282- struct io_pgtable_ops *ops;
44283+ struct io_pgtable *iop;
44284+ const struct io_pgtable_ops *ops;
44285
44286 selftest_running = true;
44287
44288 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
44289 cfg_cookie = cfg;
44290- ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
44291- if (!ops) {
44292+ iop = alloc_io_pgtable(fmts[i], cfg, cfg);
44293+ if (!iop) {
44294 pr_err("selftest: failed to allocate io pgtable ops\n");
44295 return -ENOMEM;
44296 }
44297+ ops = iop->ops;
44298
44299 /*
44300 * Initial sanity checks.
44301 * Empty page tables shouldn't provide any translations.
44302 */
44303- if (ops->iova_to_phys(ops, 42))
44304- return __FAIL(ops, i);
44305+ if (ops->iova_to_phys(iop, 42))
44306+ return __FAIL(iop, i);
44307
44308- if (ops->iova_to_phys(ops, SZ_1G + 42))
44309- return __FAIL(ops, i);
44310+ if (ops->iova_to_phys(iop, SZ_1G + 42))
44311+ return __FAIL(iop, i);
44312
44313- if (ops->iova_to_phys(ops, SZ_2G + 42))
44314- return __FAIL(ops, i);
44315+ if (ops->iova_to_phys(iop, SZ_2G + 42))
44316+ return __FAIL(iop, i);
44317
44318 /*
44319 * Distinct mappings of different granule sizes.
44320@@ -887,19 +884,19 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
44321 while (j != BITS_PER_LONG) {
44322 size = 1UL << j;
44323
44324- if (ops->map(ops, iova, iova, size, IOMMU_READ |
44325+ if (ops->map(iop, iova, iova, size, IOMMU_READ |
44326 IOMMU_WRITE |
44327 IOMMU_NOEXEC |
44328 IOMMU_CACHE))
44329- return __FAIL(ops, i);
44330+ return __FAIL(iop, i);
44331
44332 /* Overlapping mappings */
44333- if (!ops->map(ops, iova, iova + size, size,
44334+ if (!ops->map(iop, iova, iova + size, size,
44335 IOMMU_READ | IOMMU_NOEXEC))
44336- return __FAIL(ops, i);
44337+ return __FAIL(iop, i);
44338
44339- if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
44340- return __FAIL(ops, i);
44341+ if (ops->iova_to_phys(iop, iova + 42) != (iova + 42))
44342+ return __FAIL(iop, i);
44343
44344 iova += SZ_1G;
44345 j++;
44346@@ -908,15 +905,15 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
44347
44348 /* Partial unmap */
44349 size = 1UL << __ffs(cfg->pgsize_bitmap);
44350- if (ops->unmap(ops, SZ_1G + size, size) != size)
44351- return __FAIL(ops, i);
44352+ if (ops->unmap(iop, SZ_1G + size, size) != size)
44353+ return __FAIL(iop, i);
44354
44355 /* Remap of partial unmap */
44356- if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
44357- return __FAIL(ops, i);
44358+ if (ops->map(iop, SZ_1G + size, size, size, IOMMU_READ))
44359+ return __FAIL(iop, i);
44360
44361- if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
44362- return __FAIL(ops, i);
44363+ if (ops->iova_to_phys(iop, SZ_1G + size + 42) != (size + 42))
44364+ return __FAIL(iop, i);
44365
44366 /* Full unmap */
44367 iova = 0;
44368@@ -924,25 +921,25 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
44369 while (j != BITS_PER_LONG) {
44370 size = 1UL << j;
44371
44372- if (ops->unmap(ops, iova, size) != size)
44373- return __FAIL(ops, i);
44374+ if (ops->unmap(iop, iova, size) != size)
44375+ return __FAIL(iop, i);
44376
44377- if (ops->iova_to_phys(ops, iova + 42))
44378- return __FAIL(ops, i);
44379+ if (ops->iova_to_phys(iop, iova + 42))
44380+ return __FAIL(iop, i);
44381
44382 /* Remap full block */
44383- if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
44384- return __FAIL(ops, i);
44385+ if (ops->map(iop, iova, iova, size, IOMMU_WRITE))
44386+ return __FAIL(iop, i);
44387
44388- if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
44389- return __FAIL(ops, i);
44390+ if (ops->iova_to_phys(iop, iova + 42) != (iova + 42))
44391+ return __FAIL(iop, i);
44392
44393 iova += SZ_1G;
44394 j++;
44395 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
44396 }
44397
44398- free_io_pgtable_ops(ops);
44399+ free_io_pgtable(iop);
44400 }
44401
44402 selftest_running = false;
44403diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
44404index 6436fe2..088c965 100644
44405--- a/drivers/iommu/io-pgtable.c
44406+++ b/drivers/iommu/io-pgtable.c
44407@@ -40,7 +40,7 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] =
44408 #endif
44409 };
44410
44411-struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
44412+struct io_pgtable *alloc_io_pgtable(enum io_pgtable_fmt fmt,
44413 struct io_pgtable_cfg *cfg,
44414 void *cookie)
44415 {
44416@@ -62,21 +62,18 @@ struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
44417 iop->cookie = cookie;
44418 iop->cfg = *cfg;
44419
44420- return &iop->ops;
44421+ return iop;
44422 }
44423
44424 /*
44425 * It is the IOMMU driver's responsibility to ensure that the page table
44426 * is no longer accessible to the walker by this point.
44427 */
44428-void free_io_pgtable_ops(struct io_pgtable_ops *ops)
44429+void free_io_pgtable(struct io_pgtable *iop)
44430 {
44431- struct io_pgtable *iop;
44432-
44433- if (!ops)
44434+ if (!iop)
44435 return;
44436
44437- iop = container_of(ops, struct io_pgtable, ops);
44438 iop->cfg.tlb->tlb_flush_all(iop->cookie);
44439 io_pgtable_init_table[iop->fmt]->free(iop);
44440 }
44441diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
44442index 10e32f6..0b276c8 100644
44443--- a/drivers/iommu/io-pgtable.h
44444+++ b/drivers/iommu/io-pgtable.h
44445@@ -75,17 +75,18 @@ struct io_pgtable_cfg {
44446 * These functions map directly onto the iommu_ops member functions with
44447 * the same names.
44448 */
44449+struct io_pgtable;
44450 struct io_pgtable_ops {
44451- int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
44452+ int (*map)(struct io_pgtable *iop, unsigned long iova,
44453 phys_addr_t paddr, size_t size, int prot);
44454- int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
44455+ int (*unmap)(struct io_pgtable *iop, unsigned long iova,
44456 size_t size);
44457- phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
44458+ phys_addr_t (*iova_to_phys)(struct io_pgtable *iop,
44459 unsigned long iova);
44460 };
44461
44462 /**
44463- * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
44464+ * alloc_io_pgtable() - Allocate a page table allocator for use by an IOMMU.
44465 *
44466 * @fmt: The page table format.
44467 * @cfg: The page table configuration. This will be modified to represent
44468@@ -94,9 +95,9 @@ struct io_pgtable_ops {
44469 * @cookie: An opaque token provided by the IOMMU driver and passed back to
44470 * the callback routines in cfg->tlb.
44471 */
44472-struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
44473- struct io_pgtable_cfg *cfg,
44474- void *cookie);
44475+struct io_pgtable *alloc_io_pgtable(enum io_pgtable_fmt fmt,
44476+ struct io_pgtable_cfg *cfg,
44477+ void *cookie);
44478
44479 /**
44480 * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
44481@@ -105,7 +106,7 @@ struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
44482 *
44483 * @ops: The ops returned from alloc_io_pgtable_ops.
44484 */
44485-void free_io_pgtable_ops(struct io_pgtable_ops *ops);
44486+void free_io_pgtable(struct io_pgtable *iop);
44487
44488
44489 /*
44490@@ -125,7 +126,7 @@ struct io_pgtable {
44491 enum io_pgtable_fmt fmt;
44492 void *cookie;
44493 struct io_pgtable_cfg cfg;
44494- struct io_pgtable_ops ops;
44495+ const struct io_pgtable_ops *ops;
44496 };
44497
44498 /**
44499diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
44500index 72e683d..c9db262 100644
44501--- a/drivers/iommu/iommu.c
44502+++ b/drivers/iommu/iommu.c
44503@@ -802,7 +802,7 @@ static int iommu_bus_notifier(struct notifier_block *nb,
44504 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
44505 {
44506 int err;
44507- struct notifier_block *nb;
44508+ notifier_block_no_const *nb;
44509 struct iommu_callback_data cb = {
44510 .ops = ops,
44511 };
44512diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
44513index bc39bdf..e2de272 100644
44514--- a/drivers/iommu/ipmmu-vmsa.c
44515+++ b/drivers/iommu/ipmmu-vmsa.c
44516@@ -41,7 +41,7 @@ struct ipmmu_vmsa_domain {
44517 struct iommu_domain *io_domain;
44518
44519 struct io_pgtable_cfg cfg;
44520- struct io_pgtable_ops *iop;
44521+ struct io_pgtable *iop;
44522
44523 unsigned int context_id;
44524 spinlock_t lock; /* Protects mappings */
44525@@ -323,8 +323,7 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
44526 domain->cfg.oas = 40;
44527 domain->cfg.tlb = &ipmmu_gather_ops;
44528
44529- domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
44530- domain);
44531+ domain->iop = alloc_io_pgtable(ARM_32_LPAE_S1, &domain->cfg, domain);
44532 if (!domain->iop)
44533 return -EINVAL;
44534
44535@@ -482,7 +481,7 @@ static void ipmmu_domain_destroy(struct iommu_domain *io_domain)
44536 * been detached.
44537 */
44538 ipmmu_domain_destroy_context(domain);
44539- free_io_pgtable_ops(domain->iop);
44540+ free_io_pgtable(domain->iop);
44541 kfree(domain);
44542 }
44543
44544@@ -551,7 +550,7 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
44545 if (!domain)
44546 return -ENODEV;
44547
44548- return domain->iop->map(domain->iop, iova, paddr, size, prot);
44549+ return domain->iop->ops->map(domain->iop, iova, paddr, size, prot);
44550 }
44551
44552 static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
44553@@ -559,7 +558,7 @@ static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
44554 {
44555 struct ipmmu_vmsa_domain *domain = io_domain->priv;
44556
44557- return domain->iop->unmap(domain->iop, iova, size);
44558+ return domain->iop->ops->unmap(domain->iop, iova, size);
44559 }
44560
44561 static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
44562@@ -569,7 +568,7 @@ static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
44563
44564 /* TODO: Is locking needed ? */
44565
44566- return domain->iop->iova_to_phys(domain->iop, iova);
44567+ return domain->iop->ops->iova_to_phys(domain->iop, iova);
44568 }
44569
44570 static int ipmmu_find_utlbs(struct ipmmu_vmsa_device *mmu, struct device *dev,
44571diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
44572index 390079e..1da9d6c 100644
44573--- a/drivers/iommu/irq_remapping.c
44574+++ b/drivers/iommu/irq_remapping.c
44575@@ -329,7 +329,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
44576 void panic_if_irq_remap(const char *msg)
44577 {
44578 if (irq_remapping_enabled)
44579- panic(msg);
44580+ panic("%s", msg);
44581 }
44582
44583 static void ir_ack_apic_edge(struct irq_data *data)
44584@@ -350,10 +350,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
44585
44586 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
44587 {
44588- chip->irq_print_chip = ir_print_prefix;
44589- chip->irq_ack = ir_ack_apic_edge;
44590- chip->irq_eoi = ir_ack_apic_level;
44591- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
44592+ pax_open_kernel();
44593+ *(void **)&chip->irq_print_chip = ir_print_prefix;
44594+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
44595+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
44596+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
44597+ pax_close_kernel();
44598 }
44599
44600 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
44601diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
44602index 471e1cd..b53b870 100644
44603--- a/drivers/irqchip/irq-gic.c
44604+++ b/drivers/irqchip/irq-gic.c
44605@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
44606 * Supported arch specific GIC irq extension.
44607 * Default make them NULL.
44608 */
44609-struct irq_chip gic_arch_extn = {
44610+irq_chip_no_const gic_arch_extn = {
44611 .irq_eoi = NULL,
44612 .irq_mask = NULL,
44613 .irq_unmask = NULL,
44614@@ -318,7 +318,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
44615 chained_irq_exit(chip, desc);
44616 }
44617
44618-static struct irq_chip gic_chip = {
44619+static irq_chip_no_const gic_chip __read_only = {
44620 .name = "GIC",
44621 .irq_mask = gic_mask_irq,
44622 .irq_unmask = gic_unmask_irq,
44623diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
44624index 9a0767b..5e5f86f 100644
44625--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
44626+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
44627@@ -373,7 +373,7 @@ static int intc_irqpin_probe(struct platform_device *pdev)
44628 struct intc_irqpin_iomem *i;
44629 struct resource *io[INTC_IRQPIN_REG_NR];
44630 struct resource *irq;
44631- struct irq_chip *irq_chip;
44632+ irq_chip_no_const *irq_chip;
44633 void (*enable_fn)(struct irq_data *d);
44634 void (*disable_fn)(struct irq_data *d);
44635 const char *name = dev_name(dev);
44636diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
44637index 384e6ed..7a771b2 100644
44638--- a/drivers/irqchip/irq-renesas-irqc.c
44639+++ b/drivers/irqchip/irq-renesas-irqc.c
44640@@ -151,7 +151,7 @@ static int irqc_probe(struct platform_device *pdev)
44641 struct irqc_priv *p;
44642 struct resource *io;
44643 struct resource *irq;
44644- struct irq_chip *irq_chip;
44645+ irq_chip_no_const *irq_chip;
44646 const char *name = dev_name(&pdev->dev);
44647 int ret;
44648 int k;
44649diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
44650index 6a2df32..dc962f1 100644
44651--- a/drivers/isdn/capi/capi.c
44652+++ b/drivers/isdn/capi/capi.c
44653@@ -81,8 +81,8 @@ struct capiminor {
44654
44655 struct capi20_appl *ap;
44656 u32 ncci;
44657- atomic_t datahandle;
44658- atomic_t msgid;
44659+ atomic_unchecked_t datahandle;
44660+ atomic_unchecked_t msgid;
44661
44662 struct tty_port port;
44663 int ttyinstop;
44664@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
44665 capimsg_setu16(s, 2, mp->ap->applid);
44666 capimsg_setu8 (s, 4, CAPI_DATA_B3);
44667 capimsg_setu8 (s, 5, CAPI_RESP);
44668- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
44669+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
44670 capimsg_setu32(s, 8, mp->ncci);
44671 capimsg_setu16(s, 12, datahandle);
44672 }
44673@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
44674 mp->outbytes -= len;
44675 spin_unlock_bh(&mp->outlock);
44676
44677- datahandle = atomic_inc_return(&mp->datahandle);
44678+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
44679 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
44680 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
44681 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
44682 capimsg_setu16(skb->data, 2, mp->ap->applid);
44683 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
44684 capimsg_setu8 (skb->data, 5, CAPI_REQ);
44685- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
44686+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
44687 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
44688 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
44689 capimsg_setu16(skb->data, 16, len); /* Data length */
44690diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
44691index aecec6d..11e13c5 100644
44692--- a/drivers/isdn/gigaset/bas-gigaset.c
44693+++ b/drivers/isdn/gigaset/bas-gigaset.c
44694@@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
44695
44696
44697 static const struct gigaset_ops gigops = {
44698- gigaset_write_cmd,
44699- gigaset_write_room,
44700- gigaset_chars_in_buffer,
44701- gigaset_brkchars,
44702- gigaset_init_bchannel,
44703- gigaset_close_bchannel,
44704- gigaset_initbcshw,
44705- gigaset_freebcshw,
44706- gigaset_reinitbcshw,
44707- gigaset_initcshw,
44708- gigaset_freecshw,
44709- gigaset_set_modem_ctrl,
44710- gigaset_baud_rate,
44711- gigaset_set_line_ctrl,
44712- gigaset_isoc_send_skb,
44713- gigaset_isoc_input,
44714+ .write_cmd = gigaset_write_cmd,
44715+ .write_room = gigaset_write_room,
44716+ .chars_in_buffer = gigaset_chars_in_buffer,
44717+ .brkchars = gigaset_brkchars,
44718+ .init_bchannel = gigaset_init_bchannel,
44719+ .close_bchannel = gigaset_close_bchannel,
44720+ .initbcshw = gigaset_initbcshw,
44721+ .freebcshw = gigaset_freebcshw,
44722+ .reinitbcshw = gigaset_reinitbcshw,
44723+ .initcshw = gigaset_initcshw,
44724+ .freecshw = gigaset_freecshw,
44725+ .set_modem_ctrl = gigaset_set_modem_ctrl,
44726+ .baud_rate = gigaset_baud_rate,
44727+ .set_line_ctrl = gigaset_set_line_ctrl,
44728+ .send_skb = gigaset_isoc_send_skb,
44729+ .handle_input = gigaset_isoc_input,
44730 };
44731
44732 /* bas_gigaset_init
44733diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
44734index 600c79b..3752bab 100644
44735--- a/drivers/isdn/gigaset/interface.c
44736+++ b/drivers/isdn/gigaset/interface.c
44737@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
44738 }
44739 tty->driver_data = cs;
44740
44741- ++cs->port.count;
44742+ atomic_inc(&cs->port.count);
44743
44744- if (cs->port.count == 1) {
44745+ if (atomic_read(&cs->port.count) == 1) {
44746 tty_port_tty_set(&cs->port, tty);
44747 cs->port.low_latency = 1;
44748 }
44749@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
44750
44751 if (!cs->connected)
44752 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
44753- else if (!cs->port.count)
44754+ else if (!atomic_read(&cs->port.count))
44755 dev_warn(cs->dev, "%s: device not opened\n", __func__);
44756- else if (!--cs->port.count)
44757+ else if (!atomic_dec_return(&cs->port.count))
44758 tty_port_tty_set(&cs->port, NULL);
44759
44760 mutex_unlock(&cs->mutex);
44761diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
44762index 8c91fd5..14f13ce 100644
44763--- a/drivers/isdn/gigaset/ser-gigaset.c
44764+++ b/drivers/isdn/gigaset/ser-gigaset.c
44765@@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
44766 }
44767
44768 static const struct gigaset_ops ops = {
44769- gigaset_write_cmd,
44770- gigaset_write_room,
44771- gigaset_chars_in_buffer,
44772- gigaset_brkchars,
44773- gigaset_init_bchannel,
44774- gigaset_close_bchannel,
44775- gigaset_initbcshw,
44776- gigaset_freebcshw,
44777- gigaset_reinitbcshw,
44778- gigaset_initcshw,
44779- gigaset_freecshw,
44780- gigaset_set_modem_ctrl,
44781- gigaset_baud_rate,
44782- gigaset_set_line_ctrl,
44783- gigaset_m10x_send_skb, /* asyncdata.c */
44784- gigaset_m10x_input, /* asyncdata.c */
44785+ .write_cmd = gigaset_write_cmd,
44786+ .write_room = gigaset_write_room,
44787+ .chars_in_buffer = gigaset_chars_in_buffer,
44788+ .brkchars = gigaset_brkchars,
44789+ .init_bchannel = gigaset_init_bchannel,
44790+ .close_bchannel = gigaset_close_bchannel,
44791+ .initbcshw = gigaset_initbcshw,
44792+ .freebcshw = gigaset_freebcshw,
44793+ .reinitbcshw = gigaset_reinitbcshw,
44794+ .initcshw = gigaset_initcshw,
44795+ .freecshw = gigaset_freecshw,
44796+ .set_modem_ctrl = gigaset_set_modem_ctrl,
44797+ .baud_rate = gigaset_baud_rate,
44798+ .set_line_ctrl = gigaset_set_line_ctrl,
44799+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
44800+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
44801 };
44802
44803
44804diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
44805index 5f306e2..5342f88 100644
44806--- a/drivers/isdn/gigaset/usb-gigaset.c
44807+++ b/drivers/isdn/gigaset/usb-gigaset.c
44808@@ -543,7 +543,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
44809 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
44810 memcpy(cs->hw.usb->bchars, buf, 6);
44811 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
44812- 0, 0, &buf, 6, 2000);
44813+ 0, 0, buf, 6, 2000);
44814 }
44815
44816 static void gigaset_freebcshw(struct bc_state *bcs)
44817@@ -862,22 +862,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
44818 }
44819
44820 static const struct gigaset_ops ops = {
44821- gigaset_write_cmd,
44822- gigaset_write_room,
44823- gigaset_chars_in_buffer,
44824- gigaset_brkchars,
44825- gigaset_init_bchannel,
44826- gigaset_close_bchannel,
44827- gigaset_initbcshw,
44828- gigaset_freebcshw,
44829- gigaset_reinitbcshw,
44830- gigaset_initcshw,
44831- gigaset_freecshw,
44832- gigaset_set_modem_ctrl,
44833- gigaset_baud_rate,
44834- gigaset_set_line_ctrl,
44835- gigaset_m10x_send_skb,
44836- gigaset_m10x_input,
44837+ .write_cmd = gigaset_write_cmd,
44838+ .write_room = gigaset_write_room,
44839+ .chars_in_buffer = gigaset_chars_in_buffer,
44840+ .brkchars = gigaset_brkchars,
44841+ .init_bchannel = gigaset_init_bchannel,
44842+ .close_bchannel = gigaset_close_bchannel,
44843+ .initbcshw = gigaset_initbcshw,
44844+ .freebcshw = gigaset_freebcshw,
44845+ .reinitbcshw = gigaset_reinitbcshw,
44846+ .initcshw = gigaset_initcshw,
44847+ .freecshw = gigaset_freecshw,
44848+ .set_modem_ctrl = gigaset_set_modem_ctrl,
44849+ .baud_rate = gigaset_baud_rate,
44850+ .set_line_ctrl = gigaset_set_line_ctrl,
44851+ .send_skb = gigaset_m10x_send_skb,
44852+ .handle_input = gigaset_m10x_input,
44853 };
44854
44855 /*
44856diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
44857index 4d9b195..455075c 100644
44858--- a/drivers/isdn/hardware/avm/b1.c
44859+++ b/drivers/isdn/hardware/avm/b1.c
44860@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
44861 }
44862 if (left) {
44863 if (t4file->user) {
44864- if (copy_from_user(buf, dp, left))
44865+ if (left > sizeof buf || copy_from_user(buf, dp, left))
44866 return -EFAULT;
44867 } else {
44868 memcpy(buf, dp, left);
44869@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
44870 }
44871 if (left) {
44872 if (config->user) {
44873- if (copy_from_user(buf, dp, left))
44874+ if (left > sizeof buf || copy_from_user(buf, dp, left))
44875 return -EFAULT;
44876 } else {
44877 memcpy(buf, dp, left);
44878diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
44879index 9b856e1..fa03c92 100644
44880--- a/drivers/isdn/i4l/isdn_common.c
44881+++ b/drivers/isdn/i4l/isdn_common.c
44882@@ -1654,6 +1654,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
44883 } else
44884 return -EINVAL;
44885 case IIOCDBGVAR:
44886+ if (!capable(CAP_SYS_RAWIO))
44887+ return -EPERM;
44888 if (arg) {
44889 if (copy_to_user(argp, &dev, sizeof(ulong)))
44890 return -EFAULT;
44891diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
44892index 91d5730..336523e 100644
44893--- a/drivers/isdn/i4l/isdn_concap.c
44894+++ b/drivers/isdn/i4l/isdn_concap.c
44895@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
44896 }
44897
44898 struct concap_device_ops isdn_concap_reliable_dl_dops = {
44899- &isdn_concap_dl_data_req,
44900- &isdn_concap_dl_connect_req,
44901- &isdn_concap_dl_disconn_req
44902+ .data_req = &isdn_concap_dl_data_req,
44903+ .connect_req = &isdn_concap_dl_connect_req,
44904+ .disconn_req = &isdn_concap_dl_disconn_req
44905 };
44906
44907 /* The following should better go into a dedicated source file such that
44908diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
44909index bc91261..2ef7e36 100644
44910--- a/drivers/isdn/i4l/isdn_tty.c
44911+++ b/drivers/isdn/i4l/isdn_tty.c
44912@@ -1503,9 +1503,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
44913
44914 #ifdef ISDN_DEBUG_MODEM_OPEN
44915 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
44916- port->count);
44917+ atomic_read(&port->count));
44918 #endif
44919- port->count++;
44920+ atomic_inc(&port->count);
44921 port->tty = tty;
44922 /*
44923 * Start up serial port
44924@@ -1549,7 +1549,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
44925 #endif
44926 return;
44927 }
44928- if ((tty->count == 1) && (port->count != 1)) {
44929+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
44930 /*
44931 * Uh, oh. tty->count is 1, which means that the tty
44932 * structure will be freed. Info->count should always
44933@@ -1558,15 +1558,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
44934 * serial port won't be shutdown.
44935 */
44936 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
44937- "info->count is %d\n", port->count);
44938- port->count = 1;
44939+ "info->count is %d\n", atomic_read(&port->count));
44940+ atomic_set(&port->count, 1);
44941 }
44942- if (--port->count < 0) {
44943+ if (atomic_dec_return(&port->count) < 0) {
44944 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
44945- info->line, port->count);
44946- port->count = 0;
44947+ info->line, atomic_read(&port->count));
44948+ atomic_set(&port->count, 0);
44949 }
44950- if (port->count) {
44951+ if (atomic_read(&port->count)) {
44952 #ifdef ISDN_DEBUG_MODEM_OPEN
44953 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
44954 #endif
44955@@ -1620,7 +1620,7 @@ isdn_tty_hangup(struct tty_struct *tty)
44956 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
44957 return;
44958 isdn_tty_shutdown(info);
44959- port->count = 0;
44960+ atomic_set(&port->count, 0);
44961 port->flags &= ~ASYNC_NORMAL_ACTIVE;
44962 port->tty = NULL;
44963 wake_up_interruptible(&port->open_wait);
44964@@ -1965,7 +1965,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
44965 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
44966 modem_info *info = &dev->mdm.info[i];
44967
44968- if (info->port.count == 0)
44969+ if (atomic_read(&info->port.count) == 0)
44970 continue;
44971 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
44972 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
44973diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
44974index e2d4e58..40cd045 100644
44975--- a/drivers/isdn/i4l/isdn_x25iface.c
44976+++ b/drivers/isdn/i4l/isdn_x25iface.c
44977@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
44978
44979
44980 static struct concap_proto_ops ix25_pops = {
44981- &isdn_x25iface_proto_new,
44982- &isdn_x25iface_proto_del,
44983- &isdn_x25iface_proto_restart,
44984- &isdn_x25iface_proto_close,
44985- &isdn_x25iface_xmit,
44986- &isdn_x25iface_receive,
44987- &isdn_x25iface_connect_ind,
44988- &isdn_x25iface_disconn_ind
44989+ .proto_new = &isdn_x25iface_proto_new,
44990+ .proto_del = &isdn_x25iface_proto_del,
44991+ .restart = &isdn_x25iface_proto_restart,
44992+ .close = &isdn_x25iface_proto_close,
44993+ .encap_and_xmit = &isdn_x25iface_xmit,
44994+ .data_ind = &isdn_x25iface_receive,
44995+ .connect_ind = &isdn_x25iface_connect_ind,
44996+ .disconn_ind = &isdn_x25iface_disconn_ind
44997 };
44998
44999 /* error message helper function */
45000diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
45001index 358a574..b4987ea 100644
45002--- a/drivers/isdn/icn/icn.c
45003+++ b/drivers/isdn/icn/icn.c
45004@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
45005 if (count > len)
45006 count = len;
45007 if (user) {
45008- if (copy_from_user(msg, buf, count))
45009+ if (count > sizeof msg || copy_from_user(msg, buf, count))
45010 return -EFAULT;
45011 } else
45012 memcpy(msg, buf, count);
45013diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
45014index 87f7dff..7300125 100644
45015--- a/drivers/isdn/mISDN/dsp_cmx.c
45016+++ b/drivers/isdn/mISDN/dsp_cmx.c
45017@@ -1625,7 +1625,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
45018 static u16 dsp_count; /* last sample count */
45019 static int dsp_count_valid; /* if we have last sample count */
45020
45021-void
45022+void __intentional_overflow(-1)
45023 dsp_cmx_send(void *arg)
45024 {
45025 struct dsp_conf *conf;
45026diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
45027index 0f9ed1e..2715d6f 100644
45028--- a/drivers/leds/leds-clevo-mail.c
45029+++ b/drivers/leds/leds-clevo-mail.c
45030@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
45031 * detected as working, but in reality it is not) as low as
45032 * possible.
45033 */
45034-static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
45035+static struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = {
45036 {
45037 .callback = clevo_mail_led_dmi_callback,
45038 .ident = "Clevo D410J",
45039diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
45040index 046cb70..6b20d39 100644
45041--- a/drivers/leds/leds-ss4200.c
45042+++ b/drivers/leds/leds-ss4200.c
45043@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
45044 * detected as working, but in reality it is not) as low as
45045 * possible.
45046 */
45047-static struct dmi_system_id nas_led_whitelist[] __initdata = {
45048+static struct dmi_system_id nas_led_whitelist[] __initconst = {
45049 {
45050 .callback = ss4200_led_dmi_callback,
45051 .ident = "Intel SS4200-E",
45052diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
45053index 7dc93aa..8272379 100644
45054--- a/drivers/lguest/core.c
45055+++ b/drivers/lguest/core.c
45056@@ -96,9 +96,17 @@ static __init int map_switcher(void)
45057 * The end address needs +1 because __get_vm_area allocates an
45058 * extra guard page, so we need space for that.
45059 */
45060+
45061+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
45062+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
45063+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
45064+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
45065+#else
45066 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
45067 VM_ALLOC, switcher_addr, switcher_addr
45068 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
45069+#endif
45070+
45071 if (!switcher_vma) {
45072 err = -ENOMEM;
45073 printk("lguest: could not map switcher pages high\n");
45074@@ -121,7 +129,7 @@ static __init int map_switcher(void)
45075 * Now the Switcher is mapped at the right address, we can't fail!
45076 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
45077 */
45078- memcpy(switcher_vma->addr, start_switcher_text,
45079+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
45080 end_switcher_text - start_switcher_text);
45081
45082 printk(KERN_INFO "lguest: mapped switcher at %p\n",
45083diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
45084index e3abebc9..6a35328 100644
45085--- a/drivers/lguest/page_tables.c
45086+++ b/drivers/lguest/page_tables.c
45087@@ -585,7 +585,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
45088 /*:*/
45089
45090 #ifdef CONFIG_X86_PAE
45091-static void release_pmd(pmd_t *spmd)
45092+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
45093 {
45094 /* If the entry's not present, there's nothing to release. */
45095 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
45096diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
45097index 30f2aef..391c748 100644
45098--- a/drivers/lguest/x86/core.c
45099+++ b/drivers/lguest/x86/core.c
45100@@ -60,7 +60,7 @@ static struct {
45101 /* Offset from where switcher.S was compiled to where we've copied it */
45102 static unsigned long switcher_offset(void)
45103 {
45104- return switcher_addr - (unsigned long)start_switcher_text;
45105+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
45106 }
45107
45108 /* This cpu's struct lguest_pages (after the Switcher text page) */
45109@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
45110 * These copies are pretty cheap, so we do them unconditionally: */
45111 /* Save the current Host top-level page directory.
45112 */
45113+
45114+#ifdef CONFIG_PAX_PER_CPU_PGD
45115+ pages->state.host_cr3 = read_cr3();
45116+#else
45117 pages->state.host_cr3 = __pa(current->mm->pgd);
45118+#endif
45119+
45120 /*
45121 * Set up the Guest's page tables to see this CPU's pages (and no
45122 * other CPU's pages).
45123@@ -494,7 +500,7 @@ void __init lguest_arch_host_init(void)
45124 * compiled-in switcher code and the high-mapped copy we just made.
45125 */
45126 for (i = 0; i < IDT_ENTRIES; i++)
45127- default_idt_entries[i] += switcher_offset();
45128+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
45129
45130 /*
45131 * Set up the Switcher's per-cpu areas.
45132@@ -577,7 +583,7 @@ void __init lguest_arch_host_init(void)
45133 * it will be undisturbed when we switch. To change %cs and jump we
45134 * need this structure to feed to Intel's "lcall" instruction.
45135 */
45136- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
45137+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
45138 lguest_entry.segment = LGUEST_CS;
45139
45140 /*
45141diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
45142index 40634b0..4f5855e 100644
45143--- a/drivers/lguest/x86/switcher_32.S
45144+++ b/drivers/lguest/x86/switcher_32.S
45145@@ -87,6 +87,7 @@
45146 #include <asm/page.h>
45147 #include <asm/segment.h>
45148 #include <asm/lguest.h>
45149+#include <asm/processor-flags.h>
45150
45151 // We mark the start of the code to copy
45152 // It's placed in .text tho it's never run here
45153@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
45154 // Changes type when we load it: damn Intel!
45155 // For after we switch over our page tables
45156 // That entry will be read-only: we'd crash.
45157+
45158+#ifdef CONFIG_PAX_KERNEXEC
45159+ mov %cr0, %edx
45160+ xor $X86_CR0_WP, %edx
45161+ mov %edx, %cr0
45162+#endif
45163+
45164 movl $(GDT_ENTRY_TSS*8), %edx
45165 ltr %dx
45166
45167@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
45168 // Let's clear it again for our return.
45169 // The GDT descriptor of the Host
45170 // Points to the table after two "size" bytes
45171- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
45172+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
45173 // Clear "used" from type field (byte 5, bit 2)
45174- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
45175+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
45176+
45177+#ifdef CONFIG_PAX_KERNEXEC
45178+ mov %cr0, %eax
45179+ xor $X86_CR0_WP, %eax
45180+ mov %eax, %cr0
45181+#endif
45182
45183 // Once our page table's switched, the Guest is live!
45184 // The Host fades as we run this final step.
45185@@ -295,13 +309,12 @@ deliver_to_host:
45186 // I consulted gcc, and it gave
45187 // These instructions, which I gladly credit:
45188 leal (%edx,%ebx,8), %eax
45189- movzwl (%eax),%edx
45190- movl 4(%eax), %eax
45191- xorw %ax, %ax
45192- orl %eax, %edx
45193+ movl 4(%eax), %edx
45194+ movw (%eax), %dx
45195 // Now the address of the handler's in %edx
45196 // We call it now: its "iret" drops us home.
45197- jmp *%edx
45198+ ljmp $__KERNEL_CS, $1f
45199+1: jmp *%edx
45200
45201 // Every interrupt can come to us here
45202 // But we must truly tell each apart.
45203diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
45204index a08e3ee..df8ade2 100644
45205--- a/drivers/md/bcache/closure.h
45206+++ b/drivers/md/bcache/closure.h
45207@@ -238,7 +238,7 @@ static inline void closure_set_stopped(struct closure *cl)
45208 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
45209 struct workqueue_struct *wq)
45210 {
45211- BUG_ON(object_is_on_stack(cl));
45212+ BUG_ON(object_starts_on_stack(cl));
45213 closure_set_ip(cl);
45214 cl->fn = fn;
45215 cl->wq = wq;
45216diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
45217index 3a57679..c58cdaf 100644
45218--- a/drivers/md/bitmap.c
45219+++ b/drivers/md/bitmap.c
45220@@ -1786,7 +1786,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
45221 chunk_kb ? "KB" : "B");
45222 if (bitmap->storage.file) {
45223 seq_printf(seq, ", file: ");
45224- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
45225+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
45226 }
45227
45228 seq_printf(seq, "\n");
45229diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
45230index c8a18e4..0ab43e5 100644
45231--- a/drivers/md/dm-ioctl.c
45232+++ b/drivers/md/dm-ioctl.c
45233@@ -1772,7 +1772,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
45234 cmd == DM_LIST_VERSIONS_CMD)
45235 return 0;
45236
45237- if ((cmd == DM_DEV_CREATE_CMD)) {
45238+ if (cmd == DM_DEV_CREATE_CMD) {
45239 if (!*param->name) {
45240 DMWARN("name not supplied when creating device");
45241 return -EINVAL;
45242diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
45243index 089d627..ef7352e 100644
45244--- a/drivers/md/dm-raid1.c
45245+++ b/drivers/md/dm-raid1.c
45246@@ -40,7 +40,7 @@ enum dm_raid1_error {
45247
45248 struct mirror {
45249 struct mirror_set *ms;
45250- atomic_t error_count;
45251+ atomic_unchecked_t error_count;
45252 unsigned long error_type;
45253 struct dm_dev *dev;
45254 sector_t offset;
45255@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
45256 struct mirror *m;
45257
45258 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
45259- if (!atomic_read(&m->error_count))
45260+ if (!atomic_read_unchecked(&m->error_count))
45261 return m;
45262
45263 return NULL;
45264@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
45265 * simple way to tell if a device has encountered
45266 * errors.
45267 */
45268- atomic_inc(&m->error_count);
45269+ atomic_inc_unchecked(&m->error_count);
45270
45271 if (test_and_set_bit(error_type, &m->error_type))
45272 return;
45273@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
45274 struct mirror *m = get_default_mirror(ms);
45275
45276 do {
45277- if (likely(!atomic_read(&m->error_count)))
45278+ if (likely(!atomic_read_unchecked(&m->error_count)))
45279 return m;
45280
45281 if (m-- == ms->mirror)
45282@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
45283 {
45284 struct mirror *default_mirror = get_default_mirror(m->ms);
45285
45286- return !atomic_read(&default_mirror->error_count);
45287+ return !atomic_read_unchecked(&default_mirror->error_count);
45288 }
45289
45290 static int mirror_available(struct mirror_set *ms, struct bio *bio)
45291@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
45292 */
45293 if (likely(region_in_sync(ms, region, 1)))
45294 m = choose_mirror(ms, bio->bi_iter.bi_sector);
45295- else if (m && atomic_read(&m->error_count))
45296+ else if (m && atomic_read_unchecked(&m->error_count))
45297 m = NULL;
45298
45299 if (likely(m))
45300@@ -936,7 +936,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
45301 }
45302
45303 ms->mirror[mirror].ms = ms;
45304- atomic_set(&(ms->mirror[mirror].error_count), 0);
45305+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
45306 ms->mirror[mirror].error_type = 0;
45307 ms->mirror[mirror].offset = offset;
45308
45309@@ -1351,7 +1351,7 @@ static void mirror_resume(struct dm_target *ti)
45310 */
45311 static char device_status_char(struct mirror *m)
45312 {
45313- if (!atomic_read(&(m->error_count)))
45314+ if (!atomic_read_unchecked(&(m->error_count)))
45315 return 'A';
45316
45317 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
45318diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
45319index f478a4c..4b8e5ef 100644
45320--- a/drivers/md/dm-stats.c
45321+++ b/drivers/md/dm-stats.c
45322@@ -382,7 +382,7 @@ do_sync_free:
45323 synchronize_rcu_expedited();
45324 dm_stat_free(&s->rcu_head);
45325 } else {
45326- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
45327+ ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
45328 call_rcu(&s->rcu_head, dm_stat_free);
45329 }
45330 return 0;
45331@@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
45332 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
45333 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
45334 ));
45335- ACCESS_ONCE(last->last_sector) = end_sector;
45336- ACCESS_ONCE(last->last_rw) = bi_rw;
45337+ ACCESS_ONCE_RW(last->last_sector) = end_sector;
45338+ ACCESS_ONCE_RW(last->last_rw) = bi_rw;
45339 }
45340
45341 rcu_read_lock();
45342diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
45343index f8b37d4..5c5cafd 100644
45344--- a/drivers/md/dm-stripe.c
45345+++ b/drivers/md/dm-stripe.c
45346@@ -21,7 +21,7 @@ struct stripe {
45347 struct dm_dev *dev;
45348 sector_t physical_start;
45349
45350- atomic_t error_count;
45351+ atomic_unchecked_t error_count;
45352 };
45353
45354 struct stripe_c {
45355@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
45356 kfree(sc);
45357 return r;
45358 }
45359- atomic_set(&(sc->stripe[i].error_count), 0);
45360+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
45361 }
45362
45363 ti->private = sc;
45364@@ -332,7 +332,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
45365 DMEMIT("%d ", sc->stripes);
45366 for (i = 0; i < sc->stripes; i++) {
45367 DMEMIT("%s ", sc->stripe[i].dev->name);
45368- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
45369+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
45370 'D' : 'A';
45371 }
45372 buffer[i] = '\0';
45373@@ -377,8 +377,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
45374 */
45375 for (i = 0; i < sc->stripes; i++)
45376 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
45377- atomic_inc(&(sc->stripe[i].error_count));
45378- if (atomic_read(&(sc->stripe[i].error_count)) <
45379+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
45380+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
45381 DM_IO_ERROR_THRESHOLD)
45382 schedule_work(&sc->trigger_event);
45383 }
45384diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
45385index 6554d91..b0221c2 100644
45386--- a/drivers/md/dm-table.c
45387+++ b/drivers/md/dm-table.c
45388@@ -303,7 +303,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
45389 if (!dev_size)
45390 return 0;
45391
45392- if ((start >= dev_size) || (start + len > dev_size)) {
45393+ if ((start >= dev_size) || (len > dev_size - start)) {
45394 DMWARN("%s: %s too small for target: "
45395 "start=%llu, len=%llu, dev_size=%llu",
45396 dm_device_name(ti->table->md), bdevname(bdev, b),
45397diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
45398index 79f6941..b33b4e0 100644
45399--- a/drivers/md/dm-thin-metadata.c
45400+++ b/drivers/md/dm-thin-metadata.c
45401@@ -404,7 +404,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
45402 {
45403 pmd->info.tm = pmd->tm;
45404 pmd->info.levels = 2;
45405- pmd->info.value_type.context = pmd->data_sm;
45406+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
45407 pmd->info.value_type.size = sizeof(__le64);
45408 pmd->info.value_type.inc = data_block_inc;
45409 pmd->info.value_type.dec = data_block_dec;
45410@@ -423,7 +423,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
45411
45412 pmd->bl_info.tm = pmd->tm;
45413 pmd->bl_info.levels = 1;
45414- pmd->bl_info.value_type.context = pmd->data_sm;
45415+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
45416 pmd->bl_info.value_type.size = sizeof(__le64);
45417 pmd->bl_info.value_type.inc = data_block_inc;
45418 pmd->bl_info.value_type.dec = data_block_dec;
45419diff --git a/drivers/md/dm.c b/drivers/md/dm.c
45420index 8001fe9..abdd0d0 100644
45421--- a/drivers/md/dm.c
45422+++ b/drivers/md/dm.c
45423@@ -188,9 +188,9 @@ struct mapped_device {
45424 /*
45425 * Event handling.
45426 */
45427- atomic_t event_nr;
45428+ atomic_unchecked_t event_nr;
45429 wait_queue_head_t eventq;
45430- atomic_t uevent_seq;
45431+ atomic_unchecked_t uevent_seq;
45432 struct list_head uevent_list;
45433 spinlock_t uevent_lock; /* Protect access to uevent_list */
45434
45435@@ -2163,8 +2163,8 @@ static struct mapped_device *alloc_dev(int minor)
45436 spin_lock_init(&md->deferred_lock);
45437 atomic_set(&md->holders, 1);
45438 atomic_set(&md->open_count, 0);
45439- atomic_set(&md->event_nr, 0);
45440- atomic_set(&md->uevent_seq, 0);
45441+ atomic_set_unchecked(&md->event_nr, 0);
45442+ atomic_set_unchecked(&md->uevent_seq, 0);
45443 INIT_LIST_HEAD(&md->uevent_list);
45444 INIT_LIST_HEAD(&md->table_devices);
45445 spin_lock_init(&md->uevent_lock);
45446@@ -2329,7 +2329,7 @@ static void event_callback(void *context)
45447
45448 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
45449
45450- atomic_inc(&md->event_nr);
45451+ atomic_inc_unchecked(&md->event_nr);
45452 wake_up(&md->eventq);
45453 }
45454
45455@@ -3175,18 +3175,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
45456
45457 uint32_t dm_next_uevent_seq(struct mapped_device *md)
45458 {
45459- return atomic_add_return(1, &md->uevent_seq);
45460+ return atomic_add_return_unchecked(1, &md->uevent_seq);
45461 }
45462
45463 uint32_t dm_get_event_nr(struct mapped_device *md)
45464 {
45465- return atomic_read(&md->event_nr);
45466+ return atomic_read_unchecked(&md->event_nr);
45467 }
45468
45469 int dm_wait_event(struct mapped_device *md, int event_nr)
45470 {
45471 return wait_event_interruptible(md->eventq,
45472- (event_nr != atomic_read(&md->event_nr)));
45473+ (event_nr != atomic_read_unchecked(&md->event_nr)));
45474 }
45475
45476 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
45477diff --git a/drivers/md/md.c b/drivers/md/md.c
45478index e47d1dd..ebc3480 100644
45479--- a/drivers/md/md.c
45480+++ b/drivers/md/md.c
45481@@ -191,10 +191,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
45482 * start build, activate spare
45483 */
45484 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
45485-static atomic_t md_event_count;
45486+static atomic_unchecked_t md_event_count;
45487 void md_new_event(struct mddev *mddev)
45488 {
45489- atomic_inc(&md_event_count);
45490+ atomic_inc_unchecked(&md_event_count);
45491 wake_up(&md_event_waiters);
45492 }
45493 EXPORT_SYMBOL_GPL(md_new_event);
45494@@ -204,7 +204,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
45495 */
45496 static void md_new_event_inintr(struct mddev *mddev)
45497 {
45498- atomic_inc(&md_event_count);
45499+ atomic_inc_unchecked(&md_event_count);
45500 wake_up(&md_event_waiters);
45501 }
45502
45503@@ -1442,7 +1442,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
45504 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
45505 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
45506 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
45507- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
45508+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
45509
45510 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
45511 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
45512@@ -1693,7 +1693,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
45513 else
45514 sb->resync_offset = cpu_to_le64(0);
45515
45516- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
45517+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
45518
45519 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
45520 sb->size = cpu_to_le64(mddev->dev_sectors);
45521@@ -2564,7 +2564,7 @@ __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
45522 static ssize_t
45523 errors_show(struct md_rdev *rdev, char *page)
45524 {
45525- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
45526+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
45527 }
45528
45529 static ssize_t
45530@@ -2573,7 +2573,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
45531 char *e;
45532 unsigned long n = simple_strtoul(buf, &e, 10);
45533 if (*buf && (*e == 0 || *e == '\n')) {
45534- atomic_set(&rdev->corrected_errors, n);
45535+ atomic_set_unchecked(&rdev->corrected_errors, n);
45536 return len;
45537 }
45538 return -EINVAL;
45539@@ -3009,8 +3009,8 @@ int md_rdev_init(struct md_rdev *rdev)
45540 rdev->sb_loaded = 0;
45541 rdev->bb_page = NULL;
45542 atomic_set(&rdev->nr_pending, 0);
45543- atomic_set(&rdev->read_errors, 0);
45544- atomic_set(&rdev->corrected_errors, 0);
45545+ atomic_set_unchecked(&rdev->read_errors, 0);
45546+ atomic_set_unchecked(&rdev->corrected_errors, 0);
45547
45548 INIT_LIST_HEAD(&rdev->same_set);
45549 init_waitqueue_head(&rdev->blocked_wait);
45550@@ -7083,7 +7083,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
45551
45552 spin_unlock(&pers_lock);
45553 seq_printf(seq, "\n");
45554- seq->poll_event = atomic_read(&md_event_count);
45555+ seq->poll_event = atomic_read_unchecked(&md_event_count);
45556 return 0;
45557 }
45558 if (v == (void*)2) {
45559@@ -7186,7 +7186,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
45560 return error;
45561
45562 seq = file->private_data;
45563- seq->poll_event = atomic_read(&md_event_count);
45564+ seq->poll_event = atomic_read_unchecked(&md_event_count);
45565 return error;
45566 }
45567
45568@@ -7203,7 +7203,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
45569 /* always allow read */
45570 mask = POLLIN | POLLRDNORM;
45571
45572- if (seq->poll_event != atomic_read(&md_event_count))
45573+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
45574 mask |= POLLERR | POLLPRI;
45575 return mask;
45576 }
45577@@ -7250,7 +7250,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
45578 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
45579 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
45580 (int)part_stat_read(&disk->part0, sectors[1]) -
45581- atomic_read(&disk->sync_io);
45582+ atomic_read_unchecked(&disk->sync_io);
45583 /* sync IO will cause sync_io to increase before the disk_stats
45584 * as sync_io is counted when a request starts, and
45585 * disk_stats is counted when it completes.
45586diff --git a/drivers/md/md.h b/drivers/md/md.h
45587index 318ca8f..31e4478 100644
45588--- a/drivers/md/md.h
45589+++ b/drivers/md/md.h
45590@@ -94,13 +94,13 @@ struct md_rdev {
45591 * only maintained for arrays that
45592 * support hot removal
45593 */
45594- atomic_t read_errors; /* number of consecutive read errors that
45595+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
45596 * we have tried to ignore.
45597 */
45598 struct timespec last_read_error; /* monotonic time since our
45599 * last read error
45600 */
45601- atomic_t corrected_errors; /* number of corrected read errors,
45602+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
45603 * for reporting to userspace and storing
45604 * in superblock.
45605 */
45606@@ -476,7 +476,7 @@ extern void mddev_unlock(struct mddev *mddev);
45607
45608 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
45609 {
45610- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
45611+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
45612 }
45613
45614 struct md_personality
45615diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
45616index e8a9042..35bd145 100644
45617--- a/drivers/md/persistent-data/dm-space-map-metadata.c
45618+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
45619@@ -683,7 +683,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
45620 * Flick into a mode where all blocks get allocated in the new area.
45621 */
45622 smm->begin = old_len;
45623- memcpy(sm, &bootstrap_ops, sizeof(*sm));
45624+ memcpy((void *)sm, &bootstrap_ops, sizeof(*sm));
45625
45626 /*
45627 * Extend.
45628@@ -714,7 +714,7 @@ out:
45629 /*
45630 * Switch back to normal behaviour.
45631 */
45632- memcpy(sm, &ops, sizeof(*sm));
45633+ memcpy((void *)sm, &ops, sizeof(*sm));
45634 return r;
45635 }
45636
45637diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
45638index 3e6d115..ffecdeb 100644
45639--- a/drivers/md/persistent-data/dm-space-map.h
45640+++ b/drivers/md/persistent-data/dm-space-map.h
45641@@ -71,6 +71,7 @@ struct dm_space_map {
45642 dm_sm_threshold_fn fn,
45643 void *context);
45644 };
45645+typedef struct dm_space_map __no_const dm_space_map_no_const;
45646
45647 /*----------------------------------------------------------------*/
45648
45649diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
45650index d34e238..34f8d98 100644
45651--- a/drivers/md/raid1.c
45652+++ b/drivers/md/raid1.c
45653@@ -1922,7 +1922,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
45654 if (r1_sync_page_io(rdev, sect, s,
45655 bio->bi_io_vec[idx].bv_page,
45656 READ) != 0)
45657- atomic_add(s, &rdev->corrected_errors);
45658+ atomic_add_unchecked(s, &rdev->corrected_errors);
45659 }
45660 sectors -= s;
45661 sect += s;
45662@@ -2155,7 +2155,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
45663 !test_bit(Faulty, &rdev->flags)) {
45664 if (r1_sync_page_io(rdev, sect, s,
45665 conf->tmppage, READ)) {
45666- atomic_add(s, &rdev->corrected_errors);
45667+ atomic_add_unchecked(s, &rdev->corrected_errors);
45668 printk(KERN_INFO
45669 "md/raid1:%s: read error corrected "
45670 "(%d sectors at %llu on %s)\n",
45671diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
45672index a7196c4..439f012 100644
45673--- a/drivers/md/raid10.c
45674+++ b/drivers/md/raid10.c
45675@@ -1934,7 +1934,7 @@ static void end_sync_read(struct bio *bio, int error)
45676 /* The write handler will notice the lack of
45677 * R10BIO_Uptodate and record any errors etc
45678 */
45679- atomic_add(r10_bio->sectors,
45680+ atomic_add_unchecked(r10_bio->sectors,
45681 &conf->mirrors[d].rdev->corrected_errors);
45682
45683 /* for reconstruct, we always reschedule after a read.
45684@@ -2291,7 +2291,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
45685 {
45686 struct timespec cur_time_mon;
45687 unsigned long hours_since_last;
45688- unsigned int read_errors = atomic_read(&rdev->read_errors);
45689+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
45690
45691 ktime_get_ts(&cur_time_mon);
45692
45693@@ -2313,9 +2313,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
45694 * overflowing the shift of read_errors by hours_since_last.
45695 */
45696 if (hours_since_last >= 8 * sizeof(read_errors))
45697- atomic_set(&rdev->read_errors, 0);
45698+ atomic_set_unchecked(&rdev->read_errors, 0);
45699 else
45700- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
45701+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
45702 }
45703
45704 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
45705@@ -2369,8 +2369,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
45706 return;
45707
45708 check_decay_read_errors(mddev, rdev);
45709- atomic_inc(&rdev->read_errors);
45710- if (atomic_read(&rdev->read_errors) > max_read_errors) {
45711+ atomic_inc_unchecked(&rdev->read_errors);
45712+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
45713 char b[BDEVNAME_SIZE];
45714 bdevname(rdev->bdev, b);
45715
45716@@ -2378,7 +2378,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
45717 "md/raid10:%s: %s: Raid device exceeded "
45718 "read_error threshold [cur %d:max %d]\n",
45719 mdname(mddev), b,
45720- atomic_read(&rdev->read_errors), max_read_errors);
45721+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
45722 printk(KERN_NOTICE
45723 "md/raid10:%s: %s: Failing raid device\n",
45724 mdname(mddev), b);
45725@@ -2533,7 +2533,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
45726 sect +
45727 choose_data_offset(r10_bio, rdev)),
45728 bdevname(rdev->bdev, b));
45729- atomic_add(s, &rdev->corrected_errors);
45730+ atomic_add_unchecked(s, &rdev->corrected_errors);
45731 }
45732
45733 rdev_dec_pending(rdev, mddev);
45734diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
45735index cd2f96b..3876e63 100644
45736--- a/drivers/md/raid5.c
45737+++ b/drivers/md/raid5.c
45738@@ -947,23 +947,23 @@ async_copy_data(int frombio, struct bio *bio, struct page **page,
45739 struct bio_vec bvl;
45740 struct bvec_iter iter;
45741 struct page *bio_page;
45742- int page_offset;
45743+ s64 page_offset;
45744 struct async_submit_ctl submit;
45745 enum async_tx_flags flags = 0;
45746
45747 if (bio->bi_iter.bi_sector >= sector)
45748- page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
45749+ page_offset = (s64)(bio->bi_iter.bi_sector - sector) * 512;
45750 else
45751- page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
45752+ page_offset = (s64)(sector - bio->bi_iter.bi_sector) * -512;
45753
45754 if (frombio)
45755 flags |= ASYNC_TX_FENCE;
45756 init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
45757
45758 bio_for_each_segment(bvl, bio, iter) {
45759- int len = bvl.bv_len;
45760- int clen;
45761- int b_offset = 0;
45762+ s64 len = bvl.bv_len;
45763+ s64 clen;
45764+ s64 b_offset = 0;
45765
45766 if (page_offset < 0) {
45767 b_offset = -page_offset;
45768@@ -1727,6 +1727,10 @@ static int grow_one_stripe(struct r5conf *conf, int hash)
45769 return 1;
45770 }
45771
45772+#ifdef CONFIG_GRKERNSEC_HIDESYM
45773+static atomic_unchecked_t raid5_cache_id = ATOMIC_INIT(0);
45774+#endif
45775+
45776 static int grow_stripes(struct r5conf *conf, int num)
45777 {
45778 struct kmem_cache *sc;
45779@@ -1738,7 +1742,11 @@ static int grow_stripes(struct r5conf *conf, int num)
45780 "raid%d-%s", conf->level, mdname(conf->mddev));
45781 else
45782 sprintf(conf->cache_name[0],
45783+#ifdef CONFIG_GRKERNSEC_HIDESYM
45784+ "raid%d-%08lx", conf->level, atomic_inc_return_unchecked(&raid5_cache_id));
45785+#else
45786 "raid%d-%p", conf->level, conf->mddev);
45787+#endif
45788 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
45789
45790 conf->active_name = 0;
45791@@ -2014,21 +2022,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
45792 mdname(conf->mddev), STRIPE_SECTORS,
45793 (unsigned long long)s,
45794 bdevname(rdev->bdev, b));
45795- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
45796+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
45797 clear_bit(R5_ReadError, &sh->dev[i].flags);
45798 clear_bit(R5_ReWrite, &sh->dev[i].flags);
45799 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
45800 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
45801
45802- if (atomic_read(&rdev->read_errors))
45803- atomic_set(&rdev->read_errors, 0);
45804+ if (atomic_read_unchecked(&rdev->read_errors))
45805+ atomic_set_unchecked(&rdev->read_errors, 0);
45806 } else {
45807 const char *bdn = bdevname(rdev->bdev, b);
45808 int retry = 0;
45809 int set_bad = 0;
45810
45811 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
45812- atomic_inc(&rdev->read_errors);
45813+ atomic_inc_unchecked(&rdev->read_errors);
45814 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
45815 printk_ratelimited(
45816 KERN_WARNING
45817@@ -2056,7 +2064,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
45818 mdname(conf->mddev),
45819 (unsigned long long)s,
45820 bdn);
45821- } else if (atomic_read(&rdev->read_errors)
45822+ } else if (atomic_read_unchecked(&rdev->read_errors)
45823 > conf->max_nr_stripes)
45824 printk(KERN_WARNING
45825 "md/raid:%s: Too many read errors, failing device %s.\n",
45826diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
45827index 983db75..ef9248c 100644
45828--- a/drivers/media/dvb-core/dvbdev.c
45829+++ b/drivers/media/dvb-core/dvbdev.c
45830@@ -185,7 +185,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
45831 const struct dvb_device *template, void *priv, int type)
45832 {
45833 struct dvb_device *dvbdev;
45834- struct file_operations *dvbdevfops;
45835+ file_operations_no_const *dvbdevfops;
45836 struct device *clsdev;
45837 int minor;
45838 int id;
45839diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
45840index 6ad22b6..6e90e2a 100644
45841--- a/drivers/media/dvb-frontends/af9033.h
45842+++ b/drivers/media/dvb-frontends/af9033.h
45843@@ -96,6 +96,6 @@ struct af9033_ops {
45844 int (*pid_filter_ctrl)(struct dvb_frontend *fe, int onoff);
45845 int (*pid_filter)(struct dvb_frontend *fe, int index, u16 pid,
45846 int onoff);
45847-};
45848+} __no_const;
45849
45850 #endif /* AF9033_H */
45851diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
45852index 9b6c3bb..baeb5c7 100644
45853--- a/drivers/media/dvb-frontends/dib3000.h
45854+++ b/drivers/media/dvb-frontends/dib3000.h
45855@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
45856 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
45857 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
45858 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
45859-};
45860+} __no_const;
45861
45862 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
45863 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
45864diff --git a/drivers/media/dvb-frontends/dib7000p.h b/drivers/media/dvb-frontends/dib7000p.h
45865index 1fea0e9..321ce8f 100644
45866--- a/drivers/media/dvb-frontends/dib7000p.h
45867+++ b/drivers/media/dvb-frontends/dib7000p.h
45868@@ -64,7 +64,7 @@ struct dib7000p_ops {
45869 int (*get_adc_power)(struct dvb_frontend *fe);
45870 int (*slave_reset)(struct dvb_frontend *fe);
45871 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg);
45872-};
45873+} __no_const;
45874
45875 #if IS_ENABLED(CONFIG_DVB_DIB7000P)
45876 void *dib7000p_attach(struct dib7000p_ops *ops);
45877diff --git a/drivers/media/dvb-frontends/dib8000.h b/drivers/media/dvb-frontends/dib8000.h
45878index 84cc103..5780c54 100644
45879--- a/drivers/media/dvb-frontends/dib8000.h
45880+++ b/drivers/media/dvb-frontends/dib8000.h
45881@@ -61,7 +61,7 @@ struct dib8000_ops {
45882 int (*pid_filter_ctrl)(struct dvb_frontend *fe, u8 onoff);
45883 int (*pid_filter)(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff);
45884 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg);
45885-};
45886+} __no_const;
45887
45888 #if IS_ENABLED(CONFIG_DVB_DIB8000)
45889 void *dib8000_attach(struct dib8000_ops *ops);
45890diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
45891index 860c98fc..497fa25 100644
45892--- a/drivers/media/pci/cx88/cx88-video.c
45893+++ b/drivers/media/pci/cx88/cx88-video.c
45894@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
45895
45896 /* ------------------------------------------------------------------ */
45897
45898-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45899-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45900-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45901+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45902+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45903+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45904
45905 module_param_array(video_nr, int, NULL, 0444);
45906 module_param_array(vbi_nr, int, NULL, 0444);
45907diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
45908index 802642d..5534900 100644
45909--- a/drivers/media/pci/ivtv/ivtv-driver.c
45910+++ b/drivers/media/pci/ivtv/ivtv-driver.c
45911@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
45912 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
45913
45914 /* ivtv instance counter */
45915-static atomic_t ivtv_instance = ATOMIC_INIT(0);
45916+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
45917
45918 /* Parameter declarations */
45919 static int cardtype[IVTV_MAX_CARDS];
45920diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
45921index 570d119..ed25830 100644
45922--- a/drivers/media/pci/solo6x10/solo6x10-core.c
45923+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
45924@@ -424,7 +424,7 @@ static void solo_device_release(struct device *dev)
45925
45926 static int solo_sysfs_init(struct solo_dev *solo_dev)
45927 {
45928- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
45929+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
45930 struct device *dev = &solo_dev->dev;
45931 const char *driver;
45932 int i;
45933diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
45934index 7ddc767..1c24361 100644
45935--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
45936+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
45937@@ -351,7 +351,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
45938
45939 int solo_g723_init(struct solo_dev *solo_dev)
45940 {
45941- static struct snd_device_ops ops = { NULL };
45942+ static struct snd_device_ops ops = { };
45943 struct snd_card *card;
45944 struct snd_kcontrol_new kctl;
45945 char name[32];
45946diff --git a/drivers/media/pci/solo6x10/solo6x10-p2m.c b/drivers/media/pci/solo6x10/solo6x10-p2m.c
45947index 8c84846..27b4f83 100644
45948--- a/drivers/media/pci/solo6x10/solo6x10-p2m.c
45949+++ b/drivers/media/pci/solo6x10/solo6x10-p2m.c
45950@@ -73,7 +73,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
45951
45952 /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
45953 if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
45954- p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
45955+ p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
45956 if (p2m_id < 0)
45957 p2m_id = -p2m_id;
45958 }
45959diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
45960index 1ca54b0..7d7cb9a 100644
45961--- a/drivers/media/pci/solo6x10/solo6x10.h
45962+++ b/drivers/media/pci/solo6x10/solo6x10.h
45963@@ -218,7 +218,7 @@ struct solo_dev {
45964
45965 /* P2M DMA Engine */
45966 struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
45967- atomic_t p2m_count;
45968+ atomic_unchecked_t p2m_count;
45969 int p2m_jiffies;
45970 unsigned int p2m_timeouts;
45971
45972diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
45973index c135165..dc69499 100644
45974--- a/drivers/media/pci/tw68/tw68-core.c
45975+++ b/drivers/media/pci/tw68/tw68-core.c
45976@@ -60,7 +60,7 @@ static unsigned int card[] = {[0 ... (TW68_MAXBOARDS - 1)] = UNSET };
45977 module_param_array(card, int, NULL, 0444);
45978 MODULE_PARM_DESC(card, "card type");
45979
45980-static atomic_t tw68_instance = ATOMIC_INIT(0);
45981+static atomic_unchecked_t tw68_instance = ATOMIC_INIT(0);
45982
45983 /* ------------------------------------------------------------------ */
45984
45985diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
45986index ba2d8f9..1566684 100644
45987--- a/drivers/media/platform/omap/omap_vout.c
45988+++ b/drivers/media/platform/omap/omap_vout.c
45989@@ -63,7 +63,6 @@ enum omap_vout_channels {
45990 OMAP_VIDEO2,
45991 };
45992
45993-static struct videobuf_queue_ops video_vbq_ops;
45994 /* Variables configurable through module params*/
45995 static u32 video1_numbuffers = 3;
45996 static u32 video2_numbuffers = 3;
45997@@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
45998 {
45999 struct videobuf_queue *q;
46000 struct omap_vout_device *vout = NULL;
46001+ static struct videobuf_queue_ops video_vbq_ops = {
46002+ .buf_setup = omap_vout_buffer_setup,
46003+ .buf_prepare = omap_vout_buffer_prepare,
46004+ .buf_release = omap_vout_buffer_release,
46005+ .buf_queue = omap_vout_buffer_queue,
46006+ };
46007
46008 vout = video_drvdata(file);
46009 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
46010@@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
46011 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
46012
46013 q = &vout->vbq;
46014- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
46015- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
46016- video_vbq_ops.buf_release = omap_vout_buffer_release;
46017- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
46018 spin_lock_init(&vout->vbq_lock);
46019
46020 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
46021diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
46022index fb2acc5..a2fcbdc4 100644
46023--- a/drivers/media/platform/s5p-tv/mixer.h
46024+++ b/drivers/media/platform/s5p-tv/mixer.h
46025@@ -156,7 +156,7 @@ struct mxr_layer {
46026 /** layer index (unique identifier) */
46027 int idx;
46028 /** callbacks for layer methods */
46029- struct mxr_layer_ops ops;
46030+ struct mxr_layer_ops *ops;
46031 /** format array */
46032 const struct mxr_format **fmt_array;
46033 /** size of format array */
46034diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46035index 74344c7..a39e70e 100644
46036--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46037+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46038@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
46039 {
46040 struct mxr_layer *layer;
46041 int ret;
46042- struct mxr_layer_ops ops = {
46043+ static struct mxr_layer_ops ops = {
46044 .release = mxr_graph_layer_release,
46045 .buffer_set = mxr_graph_buffer_set,
46046 .stream_set = mxr_graph_stream_set,
46047diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
46048index b713403..53cb5ad 100644
46049--- a/drivers/media/platform/s5p-tv/mixer_reg.c
46050+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
46051@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
46052 layer->update_buf = next;
46053 }
46054
46055- layer->ops.buffer_set(layer, layer->update_buf);
46056+ layer->ops->buffer_set(layer, layer->update_buf);
46057
46058 if (done && done != layer->shadow_buf)
46059 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
46060diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
46061index 72d4f2e..4b2ea0d 100644
46062--- a/drivers/media/platform/s5p-tv/mixer_video.c
46063+++ b/drivers/media/platform/s5p-tv/mixer_video.c
46064@@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
46065 layer->geo.src.height = layer->geo.src.full_height;
46066
46067 mxr_geometry_dump(mdev, &layer->geo);
46068- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46069+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46070 mxr_geometry_dump(mdev, &layer->geo);
46071 }
46072
46073@@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
46074 layer->geo.dst.full_width = mbus_fmt.width;
46075 layer->geo.dst.full_height = mbus_fmt.height;
46076 layer->geo.dst.field = mbus_fmt.field;
46077- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46078+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46079
46080 mxr_geometry_dump(mdev, &layer->geo);
46081 }
46082@@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
46083 /* set source size to highest accepted value */
46084 geo->src.full_width = max(geo->dst.full_width, pix->width);
46085 geo->src.full_height = max(geo->dst.full_height, pix->height);
46086- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46087+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46088 mxr_geometry_dump(mdev, &layer->geo);
46089 /* set cropping to total visible screen */
46090 geo->src.width = pix->width;
46091@@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
46092 geo->src.x_offset = 0;
46093 geo->src.y_offset = 0;
46094 /* assure consistency of geometry */
46095- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
46096+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
46097 mxr_geometry_dump(mdev, &layer->geo);
46098 /* set full size to lowest possible value */
46099 geo->src.full_width = 0;
46100 geo->src.full_height = 0;
46101- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46102+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46103 mxr_geometry_dump(mdev, &layer->geo);
46104
46105 /* returning results */
46106@@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
46107 target->width = s->r.width;
46108 target->height = s->r.height;
46109
46110- layer->ops.fix_geometry(layer, stage, s->flags);
46111+ layer->ops->fix_geometry(layer, stage, s->flags);
46112
46113 /* retrieve update selection rectangle */
46114 res.left = target->x_offset;
46115@@ -938,13 +938,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
46116 mxr_output_get(mdev);
46117
46118 mxr_layer_update_output(layer);
46119- layer->ops.format_set(layer);
46120+ layer->ops->format_set(layer);
46121 /* enabling layer in hardware */
46122 spin_lock_irqsave(&layer->enq_slock, flags);
46123 layer->state = MXR_LAYER_STREAMING;
46124 spin_unlock_irqrestore(&layer->enq_slock, flags);
46125
46126- layer->ops.stream_set(layer, MXR_ENABLE);
46127+ layer->ops->stream_set(layer, MXR_ENABLE);
46128 mxr_streamer_get(mdev);
46129
46130 return 0;
46131@@ -1014,7 +1014,7 @@ static void stop_streaming(struct vb2_queue *vq)
46132 spin_unlock_irqrestore(&layer->enq_slock, flags);
46133
46134 /* disabling layer in hardware */
46135- layer->ops.stream_set(layer, MXR_DISABLE);
46136+ layer->ops->stream_set(layer, MXR_DISABLE);
46137 /* remove one streamer */
46138 mxr_streamer_put(mdev);
46139 /* allow changes in output configuration */
46140@@ -1052,8 +1052,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
46141
46142 void mxr_layer_release(struct mxr_layer *layer)
46143 {
46144- if (layer->ops.release)
46145- layer->ops.release(layer);
46146+ if (layer->ops->release)
46147+ layer->ops->release(layer);
46148 }
46149
46150 void mxr_base_layer_release(struct mxr_layer *layer)
46151@@ -1079,7 +1079,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
46152
46153 layer->mdev = mdev;
46154 layer->idx = idx;
46155- layer->ops = *ops;
46156+ layer->ops = ops;
46157
46158 spin_lock_init(&layer->enq_slock);
46159 INIT_LIST_HEAD(&layer->enq_list);
46160diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46161index c9388c4..ce71ece 100644
46162--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46163+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46164@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
46165 {
46166 struct mxr_layer *layer;
46167 int ret;
46168- struct mxr_layer_ops ops = {
46169+ static struct mxr_layer_ops ops = {
46170 .release = mxr_vp_layer_release,
46171 .buffer_set = mxr_vp_buffer_set,
46172 .stream_set = mxr_vp_stream_set,
46173diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
46174index 82affae..42833ec 100644
46175--- a/drivers/media/radio/radio-cadet.c
46176+++ b/drivers/media/radio/radio-cadet.c
46177@@ -333,6 +333,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
46178 unsigned char readbuf[RDS_BUFFER];
46179 int i = 0;
46180
46181+ if (count > RDS_BUFFER)
46182+ return -EFAULT;
46183 mutex_lock(&dev->lock);
46184 if (dev->rdsstat == 0)
46185 cadet_start_rds(dev);
46186@@ -349,8 +351,9 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
46187 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
46188 mutex_unlock(&dev->lock);
46189
46190- if (i && copy_to_user(data, readbuf, i))
46191- return -EFAULT;
46192+ if (i > sizeof(readbuf) || (i && copy_to_user(data, readbuf, i)))
46193+ i = -EFAULT;
46194+
46195 return i;
46196 }
46197
46198diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
46199index 5236035..c622c74 100644
46200--- a/drivers/media/radio/radio-maxiradio.c
46201+++ b/drivers/media/radio/radio-maxiradio.c
46202@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
46203 /* TEA5757 pin mappings */
46204 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
46205
46206-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
46207+static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
46208
46209 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
46210 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
46211diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
46212index 050b3bb..79f62b9 100644
46213--- a/drivers/media/radio/radio-shark.c
46214+++ b/drivers/media/radio/radio-shark.c
46215@@ -79,7 +79,7 @@ struct shark_device {
46216 u32 last_val;
46217 };
46218
46219-static atomic_t shark_instance = ATOMIC_INIT(0);
46220+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
46221
46222 static void shark_write_val(struct snd_tea575x *tea, u32 val)
46223 {
46224diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
46225index 8654e0d..0608a64 100644
46226--- a/drivers/media/radio/radio-shark2.c
46227+++ b/drivers/media/radio/radio-shark2.c
46228@@ -74,7 +74,7 @@ struct shark_device {
46229 u8 *transfer_buffer;
46230 };
46231
46232-static atomic_t shark_instance = ATOMIC_INIT(0);
46233+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
46234
46235 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
46236 {
46237diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
46238index dccf586..d5db411 100644
46239--- a/drivers/media/radio/radio-si476x.c
46240+++ b/drivers/media/radio/radio-si476x.c
46241@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
46242 struct si476x_radio *radio;
46243 struct v4l2_ctrl *ctrl;
46244
46245- static atomic_t instance = ATOMIC_INIT(0);
46246+ static atomic_unchecked_t instance = ATOMIC_INIT(0);
46247
46248 radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
46249 if (!radio)
46250diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
46251index 704397f..4d05977 100644
46252--- a/drivers/media/radio/wl128x/fmdrv_common.c
46253+++ b/drivers/media/radio/wl128x/fmdrv_common.c
46254@@ -71,7 +71,7 @@ module_param(default_rds_buf, uint, 0444);
46255 MODULE_PARM_DESC(rds_buf, "RDS buffer entries");
46256
46257 /* Radio Nr */
46258-static u32 radio_nr = -1;
46259+static int radio_nr = -1;
46260 module_param(radio_nr, int, 0444);
46261 MODULE_PARM_DESC(radio_nr, "Radio Nr");
46262
46263diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
46264index 9fd1527..8927230 100644
46265--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
46266+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
46267@@ -50,29 +50,73 @@ static struct dvb_usb_device_properties cinergyt2_properties;
46268
46269 static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
46270 {
46271- char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 };
46272- char result[64];
46273- return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result,
46274- sizeof(result), 0);
46275+ char *buf;
46276+ char *result;
46277+ int retval;
46278+
46279+ buf = kmalloc(2, GFP_KERNEL);
46280+ if (buf == NULL)
46281+ return -ENOMEM;
46282+ result = kmalloc(64, GFP_KERNEL);
46283+ if (result == NULL) {
46284+ kfree(buf);
46285+ return -ENOMEM;
46286+ }
46287+
46288+ buf[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
46289+ buf[1] = enable ? 1 : 0;
46290+
46291+ retval = dvb_usb_generic_rw(adap->dev, buf, 2, result, 64, 0);
46292+
46293+ kfree(buf);
46294+ kfree(result);
46295+ return retval;
46296 }
46297
46298 static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
46299 {
46300- char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 };
46301- char state[3];
46302- return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0);
46303+ char *buf;
46304+ char *state;
46305+ int retval;
46306+
46307+ buf = kmalloc(2, GFP_KERNEL);
46308+ if (buf == NULL)
46309+ return -ENOMEM;
46310+ state = kmalloc(3, GFP_KERNEL);
46311+ if (state == NULL) {
46312+ kfree(buf);
46313+ return -ENOMEM;
46314+ }
46315+
46316+ buf[0] = CINERGYT2_EP1_SLEEP_MODE;
46317+ buf[1] = enable ? 1 : 0;
46318+
46319+ retval = dvb_usb_generic_rw(d, buf, 2, state, 3, 0);
46320+
46321+ kfree(buf);
46322+ kfree(state);
46323+ return retval;
46324 }
46325
46326 static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
46327 {
46328- char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION };
46329- char state[3];
46330+ char *query;
46331+ char *state;
46332 int ret;
46333+ query = kmalloc(1, GFP_KERNEL);
46334+ if (query == NULL)
46335+ return -ENOMEM;
46336+ state = kmalloc(3, GFP_KERNEL);
46337+ if (state == NULL) {
46338+ kfree(query);
46339+ return -ENOMEM;
46340+ }
46341+
46342+ query[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
46343
46344 adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
46345
46346- ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state,
46347- sizeof(state), 0);
46348+ ret = dvb_usb_generic_rw(adap->dev, query, 1, state, 3, 0);
46349 if (ret < 0) {
46350 deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
46351 "state info\n");
46352@@ -80,7 +124,8 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
46353
46354 /* Copy this pointer as we are gonna need it in the release phase */
46355 cinergyt2_usb_device = adap->dev;
46356-
46357+ kfree(query);
46358+ kfree(state);
46359 return 0;
46360 }
46361
46362@@ -141,12 +186,23 @@ static int repeatable_keys[] = {
46363 static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46364 {
46365 struct cinergyt2_state *st = d->priv;
46366- u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS;
46367+ u8 *key, *cmd;
46368 int i;
46369
46370+ cmd = kmalloc(1, GFP_KERNEL);
46371+ if (cmd == NULL)
46372+ return -EINVAL;
46373+ key = kzalloc(5, GFP_KERNEL);
46374+ if (key == NULL) {
46375+ kfree(cmd);
46376+ return -EINVAL;
46377+ }
46378+
46379+ cmd[0] = CINERGYT2_EP1_GET_RC_EVENTS;
46380+
46381 *state = REMOTE_NO_KEY_PRESSED;
46382
46383- dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0);
46384+ dvb_usb_generic_rw(d, cmd, 1, key, 5, 0);
46385 if (key[4] == 0xff) {
46386 /* key repeat */
46387 st->rc_counter++;
46388@@ -157,12 +213,12 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46389 *event = d->last_event;
46390 deb_rc("repeat key, event %x\n",
46391 *event);
46392- return 0;
46393+ goto out;
46394 }
46395 }
46396 deb_rc("repeated key (non repeatable)\n");
46397 }
46398- return 0;
46399+ goto out;
46400 }
46401
46402 /* hack to pass checksum on the custom field */
46403@@ -174,6 +230,9 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46404
46405 deb_rc("key: %*ph\n", 5, key);
46406 }
46407+out:
46408+ kfree(cmd);
46409+ kfree(key);
46410 return 0;
46411 }
46412
46413diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46414index c890fe4..f9b2ae6 100644
46415--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46416+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46417@@ -145,103 +145,176 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
46418 fe_status_t *status)
46419 {
46420 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46421- struct dvbt_get_status_msg result;
46422- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46423+ struct dvbt_get_status_msg *result;
46424+ u8 *cmd;
46425 int ret;
46426
46427- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result,
46428- sizeof(result), 0);
46429+ cmd = kmalloc(1, GFP_KERNEL);
46430+ if (cmd == NULL)
46431+ return -ENOMEM;
46432+ result = kmalloc(sizeof(*result), GFP_KERNEL);
46433+ if (result == NULL) {
46434+ kfree(cmd);
46435+ return -ENOMEM;
46436+ }
46437+
46438+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46439+
46440+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)result,
46441+ sizeof(*result), 0);
46442 if (ret < 0)
46443- return ret;
46444+ goto out;
46445
46446 *status = 0;
46447
46448- if (0xffff - le16_to_cpu(result.gain) > 30)
46449+ if (0xffff - le16_to_cpu(result->gain) > 30)
46450 *status |= FE_HAS_SIGNAL;
46451- if (result.lock_bits & (1 << 6))
46452+ if (result->lock_bits & (1 << 6))
46453 *status |= FE_HAS_LOCK;
46454- if (result.lock_bits & (1 << 5))
46455+ if (result->lock_bits & (1 << 5))
46456 *status |= FE_HAS_SYNC;
46457- if (result.lock_bits & (1 << 4))
46458+ if (result->lock_bits & (1 << 4))
46459 *status |= FE_HAS_CARRIER;
46460- if (result.lock_bits & (1 << 1))
46461+ if (result->lock_bits & (1 << 1))
46462 *status |= FE_HAS_VITERBI;
46463
46464 if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
46465 (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC))
46466 *status &= ~FE_HAS_LOCK;
46467
46468- return 0;
46469+out:
46470+ kfree(cmd);
46471+ kfree(result);
46472+ return ret;
46473 }
46474
46475 static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
46476 {
46477 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46478- struct dvbt_get_status_msg status;
46479- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46480+ struct dvbt_get_status_msg *status;
46481+ char *cmd;
46482 int ret;
46483
46484- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
46485- sizeof(status), 0);
46486+ cmd = kmalloc(1, GFP_KERNEL);
46487+ if (cmd == NULL)
46488+ return -ENOMEM;
46489+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46490+ if (status == NULL) {
46491+ kfree(cmd);
46492+ return -ENOMEM;
46493+ }
46494+
46495+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46496+
46497+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
46498+ sizeof(*status), 0);
46499 if (ret < 0)
46500- return ret;
46501+ goto out;
46502
46503- *ber = le32_to_cpu(status.viterbi_error_rate);
46504+ *ber = le32_to_cpu(status->viterbi_error_rate);
46505+out:
46506+ kfree(cmd);
46507+ kfree(status);
46508 return 0;
46509 }
46510
46511 static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
46512 {
46513 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46514- struct dvbt_get_status_msg status;
46515- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46516+ struct dvbt_get_status_msg *status;
46517+ u8 *cmd;
46518 int ret;
46519
46520- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status,
46521- sizeof(status), 0);
46522+ cmd = kmalloc(1, GFP_KERNEL);
46523+ if (cmd == NULL)
46524+ return -ENOMEM;
46525+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46526+ if (status == NULL) {
46527+ kfree(cmd);
46528+ return -ENOMEM;
46529+ }
46530+
46531+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46532+
46533+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)status,
46534+ sizeof(*status), 0);
46535 if (ret < 0) {
46536 err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
46537 ret);
46538- return ret;
46539+ goto out;
46540 }
46541- *unc = le32_to_cpu(status.uncorrected_block_count);
46542- return 0;
46543+ *unc = le32_to_cpu(status->uncorrected_block_count);
46544+
46545+out:
46546+ kfree(cmd);
46547+ kfree(status);
46548+ return ret;
46549 }
46550
46551 static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
46552 u16 *strength)
46553 {
46554 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46555- struct dvbt_get_status_msg status;
46556- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46557+ struct dvbt_get_status_msg *status;
46558+ char *cmd;
46559 int ret;
46560
46561- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
46562- sizeof(status), 0);
46563+ cmd = kmalloc(1, GFP_KERNEL);
46564+ if (cmd == NULL)
46565+ return -ENOMEM;
46566+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46567+ if (status == NULL) {
46568+ kfree(cmd);
46569+ return -ENOMEM;
46570+ }
46571+
46572+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46573+
46574+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
46575+ sizeof(*status), 0);
46576 if (ret < 0) {
46577 err("cinergyt2_fe_read_signal_strength() Failed!"
46578 " (Error=%d)\n", ret);
46579- return ret;
46580+ goto out;
46581 }
46582- *strength = (0xffff - le16_to_cpu(status.gain));
46583+ *strength = (0xffff - le16_to_cpu(status->gain));
46584+
46585+out:
46586+ kfree(cmd);
46587+ kfree(status);
46588 return 0;
46589 }
46590
46591 static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
46592 {
46593 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46594- struct dvbt_get_status_msg status;
46595- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46596+ struct dvbt_get_status_msg *status;
46597+ char *cmd;
46598 int ret;
46599
46600- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
46601- sizeof(status), 0);
46602+ cmd = kmalloc(1, GFP_KERNEL);
46603+ if (cmd == NULL)
46604+ return -ENOMEM;
46605+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46606+ if (status == NULL) {
46607+ kfree(cmd);
46608+ return -ENOMEM;
46609+ }
46610+
46611+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46612+
46613+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
46614+ sizeof(*status), 0);
46615 if (ret < 0) {
46616 err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
46617- return ret;
46618+ goto out;
46619 }
46620- *snr = (status.snr << 8) | status.snr;
46621- return 0;
46622+ *snr = (status->snr << 8) | status->snr;
46623+
46624+out:
46625+ kfree(cmd);
46626+ kfree(status);
46627+ return ret;
46628 }
46629
46630 static int cinergyt2_fe_init(struct dvb_frontend *fe)
46631@@ -266,35 +339,46 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
46632 {
46633 struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
46634 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46635- struct dvbt_set_parameters_msg param;
46636- char result[2];
46637+ struct dvbt_set_parameters_msg *param;
46638+ char *result;
46639 int err;
46640
46641- param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
46642- param.tps = cpu_to_le16(compute_tps(fep));
46643- param.freq = cpu_to_le32(fep->frequency / 1000);
46644- param.flags = 0;
46645+ result = kmalloc(2, GFP_KERNEL);
46646+ if (result == NULL)
46647+ return -ENOMEM;
46648+ param = kmalloc(sizeof(*param), GFP_KERNEL);
46649+ if (param == NULL) {
46650+ kfree(result);
46651+ return -ENOMEM;
46652+ }
46653+
46654+ param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
46655+ param->tps = cpu_to_le16(compute_tps(fep));
46656+ param->freq = cpu_to_le32(fep->frequency / 1000);
46657+ param->flags = 0;
46658
46659 switch (fep->bandwidth_hz) {
46660 default:
46661 case 8000000:
46662- param.bandwidth = 8;
46663+ param->bandwidth = 8;
46664 break;
46665 case 7000000:
46666- param.bandwidth = 7;
46667+ param->bandwidth = 7;
46668 break;
46669 case 6000000:
46670- param.bandwidth = 6;
46671+ param->bandwidth = 6;
46672 break;
46673 }
46674
46675 err = dvb_usb_generic_rw(state->d,
46676- (char *)&param, sizeof(param),
46677- result, sizeof(result), 0);
46678+ (char *)param, sizeof(*param),
46679+ result, 2, 0);
46680 if (err < 0)
46681 err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
46682
46683- return (err < 0) ? err : 0;
46684+ kfree(result);
46685+ kfree(param);
46686+ return err;
46687 }
46688
46689 static void cinergyt2_fe_release(struct dvb_frontend *fe)
46690diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46691index 733a7ff..f8b52e3 100644
46692--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46693+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46694@@ -35,42 +35,57 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
46695
46696 int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
46697 {
46698- struct hexline hx;
46699- u8 reset;
46700+ struct hexline *hx;
46701+ u8 *reset;
46702 int ret,pos=0;
46703
46704+ reset = kmalloc(1, GFP_KERNEL);
46705+ if (reset == NULL)
46706+ return -ENOMEM;
46707+
46708+ hx = kmalloc(sizeof(struct hexline), GFP_KERNEL);
46709+ if (hx == NULL) {
46710+ kfree(reset);
46711+ return -ENOMEM;
46712+ }
46713+
46714 /* stop the CPU */
46715- reset = 1;
46716- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
46717+ reset[0] = 1;
46718+ if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1)) != 1)
46719 err("could not stop the USB controller CPU.");
46720
46721- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
46722- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
46723- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
46724+ while ((ret = dvb_usb_get_hexline(fw,hx,&pos)) > 0) {
46725+ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx->addr,hx->len,hx->chk);
46726+ ret = usb_cypress_writemem(udev,hx->addr,hx->data,hx->len);
46727
46728- if (ret != hx.len) {
46729+ if (ret != hx->len) {
46730 err("error while transferring firmware "
46731 "(transferred size: %d, block size: %d)",
46732- ret,hx.len);
46733+ ret,hx->len);
46734 ret = -EINVAL;
46735 break;
46736 }
46737 }
46738 if (ret < 0) {
46739 err("firmware download failed at %d with %d",pos,ret);
46740+ kfree(reset);
46741+ kfree(hx);
46742 return ret;
46743 }
46744
46745 if (ret == 0) {
46746 /* restart the CPU */
46747- reset = 0;
46748- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
46749+ reset[0] = 0;
46750+ if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1) != 1) {
46751 err("could not restart the USB controller CPU.");
46752 ret = -EINVAL;
46753 }
46754 } else
46755 ret = -EIO;
46756
46757+ kfree(reset);
46758+ kfree(hx);
46759+
46760 return ret;
46761 }
46762 EXPORT_SYMBOL(usb_cypress_load_firmware);
46763diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
46764index 1a3df10..57997a5 100644
46765--- a/drivers/media/usb/dvb-usb/dw2102.c
46766+++ b/drivers/media/usb/dvb-usb/dw2102.c
46767@@ -118,7 +118,7 @@ struct su3000_state {
46768
46769 struct s6x0_state {
46770 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
46771-};
46772+} __no_const;
46773
46774 /* debug */
46775 static int dvb_usb_dw2102_debug;
46776diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
46777index 5801ae7..83f71fa 100644
46778--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
46779+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
46780@@ -87,8 +87,11 @@ struct technisat_usb2_state {
46781 static int technisat_usb2_i2c_access(struct usb_device *udev,
46782 u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
46783 {
46784- u8 b[64];
46785- int ret, actual_length;
46786+ u8 *b = kmalloc(64, GFP_KERNEL);
46787+ int ret, actual_length, error = 0;
46788+
46789+ if (b == NULL)
46790+ return -ENOMEM;
46791
46792 deb_i2c("i2c-access: %02x, tx: ", device_addr);
46793 debug_dump(tx, txlen, deb_i2c);
46794@@ -121,7 +124,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
46795
46796 if (ret < 0) {
46797 err("i2c-error: out failed %02x = %d", device_addr, ret);
46798- return -ENODEV;
46799+ error = -ENODEV;
46800+ goto out;
46801 }
46802
46803 ret = usb_bulk_msg(udev,
46804@@ -129,7 +133,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
46805 b, 64, &actual_length, 1000);
46806 if (ret < 0) {
46807 err("i2c-error: in failed %02x = %d", device_addr, ret);
46808- return -ENODEV;
46809+ error = -ENODEV;
46810+ goto out;
46811 }
46812
46813 if (b[0] != I2C_STATUS_OK) {
46814@@ -137,8 +142,10 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
46815 /* handle tuner-i2c-nak */
46816 if (!(b[0] == I2C_STATUS_NAK &&
46817 device_addr == 0x60
46818- /* && device_is_technisat_usb2 */))
46819- return -ENODEV;
46820+ /* && device_is_technisat_usb2 */)) {
46821+ error = -ENODEV;
46822+ goto out;
46823+ }
46824 }
46825
46826 deb_i2c("status: %d, ", b[0]);
46827@@ -152,7 +159,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
46828
46829 deb_i2c("\n");
46830
46831- return 0;
46832+out:
46833+ kfree(b);
46834+ return error;
46835 }
46836
46837 static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
46838@@ -224,14 +233,16 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
46839 {
46840 int ret;
46841
46842- u8 led[8] = {
46843- red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
46844- 0
46845- };
46846+ u8 *led = kzalloc(8, GFP_KERNEL);
46847+
46848+ if (led == NULL)
46849+ return -ENOMEM;
46850
46851 if (disable_led_control && state != TECH_LED_OFF)
46852 return 0;
46853
46854+ led[0] = red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST;
46855+
46856 switch (state) {
46857 case TECH_LED_ON:
46858 led[1] = 0x82;
46859@@ -263,16 +274,22 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
46860 red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
46861 USB_TYPE_VENDOR | USB_DIR_OUT,
46862 0, 0,
46863- led, sizeof(led), 500);
46864+ led, 8, 500);
46865
46866 mutex_unlock(&d->i2c_mutex);
46867+
46868+ kfree(led);
46869+
46870 return ret;
46871 }
46872
46873 static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green)
46874 {
46875 int ret;
46876- u8 b = 0;
46877+ u8 *b = kzalloc(1, GFP_KERNEL);
46878+
46879+ if (b == NULL)
46880+ return -ENOMEM;
46881
46882 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
46883 return -EAGAIN;
46884@@ -281,10 +298,12 @@ static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 gre
46885 SET_LED_TIMER_DIVIDER_VENDOR_REQUEST,
46886 USB_TYPE_VENDOR | USB_DIR_OUT,
46887 (red << 8) | green, 0,
46888- &b, 1, 500);
46889+ b, 1, 500);
46890
46891 mutex_unlock(&d->i2c_mutex);
46892
46893+ kfree(b);
46894+
46895 return ret;
46896 }
46897
46898@@ -328,7 +347,7 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
46899 struct dvb_usb_device_description **desc, int *cold)
46900 {
46901 int ret;
46902- u8 version[3];
46903+ u8 *version = kmalloc(3, GFP_KERNEL);
46904
46905 /* first select the interface */
46906 if (usb_set_interface(udev, 0, 1) != 0)
46907@@ -338,11 +357,14 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
46908
46909 *cold = 0; /* by default do not download a firmware - just in case something is wrong */
46910
46911+ if (version == NULL)
46912+ return 0;
46913+
46914 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
46915 GET_VERSION_INFO_VENDOR_REQUEST,
46916 USB_TYPE_VENDOR | USB_DIR_IN,
46917 0, 0,
46918- version, sizeof(version), 500);
46919+ version, 3, 500);
46920
46921 if (ret < 0)
46922 *cold = 1;
46923@@ -351,6 +373,8 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
46924 *cold = 0;
46925 }
46926
46927+ kfree(version);
46928+
46929 return 0;
46930 }
46931
46932@@ -594,10 +618,15 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
46933
46934 static int technisat_usb2_get_ir(struct dvb_usb_device *d)
46935 {
46936- u8 buf[62], *b;
46937+ u8 *buf, *b;
46938 int ret;
46939 struct ir_raw_event ev;
46940
46941+ buf = kmalloc(62, GFP_KERNEL);
46942+
46943+ if (buf == NULL)
46944+ return -ENOMEM;
46945+
46946 buf[0] = GET_IR_DATA_VENDOR_REQUEST;
46947 buf[1] = 0x08;
46948 buf[2] = 0x8f;
46949@@ -620,16 +649,20 @@ static int technisat_usb2_get_ir(struct dvb_usb_device *d)
46950 GET_IR_DATA_VENDOR_REQUEST,
46951 USB_TYPE_VENDOR | USB_DIR_IN,
46952 0x8080, 0,
46953- buf, sizeof(buf), 500);
46954+ buf, 62, 500);
46955
46956 unlock:
46957 mutex_unlock(&d->i2c_mutex);
46958
46959- if (ret < 0)
46960+ if (ret < 0) {
46961+ kfree(buf);
46962 return ret;
46963+ }
46964
46965- if (ret == 1)
46966+ if (ret == 1) {
46967+ kfree(buf);
46968 return 0; /* no key pressed */
46969+ }
46970
46971 /* decoding */
46972 b = buf+1;
46973@@ -656,6 +689,8 @@ unlock:
46974
46975 ir_raw_event_handle(d->rc_dev);
46976
46977+ kfree(buf);
46978+
46979 return 1;
46980 }
46981
46982diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
46983index af63543..0436f20 100644
46984--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
46985+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
46986@@ -429,7 +429,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
46987 * by passing a very big num_planes value */
46988 uplane = compat_alloc_user_space(num_planes *
46989 sizeof(struct v4l2_plane));
46990- kp->m.planes = (__force struct v4l2_plane *)uplane;
46991+ kp->m.planes = (__force_kernel struct v4l2_plane *)uplane;
46992
46993 while (--num_planes >= 0) {
46994 ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
46995@@ -500,7 +500,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
46996 if (num_planes == 0)
46997 return 0;
46998
46999- uplane = (__force struct v4l2_plane __user *)kp->m.planes;
47000+ uplane = (struct v4l2_plane __force_user *)kp->m.planes;
47001 if (get_user(p, &up->m.planes))
47002 return -EFAULT;
47003 uplane32 = compat_ptr(p);
47004@@ -564,7 +564,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
47005 get_user(kp->flags, &up->flags) ||
47006 copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
47007 return -EFAULT;
47008- kp->base = (__force void *)compat_ptr(tmp);
47009+ kp->base = (__force_kernel void *)compat_ptr(tmp);
47010 return 0;
47011 }
47012
47013@@ -669,7 +669,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
47014 n * sizeof(struct v4l2_ext_control32)))
47015 return -EFAULT;
47016 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
47017- kp->controls = (__force struct v4l2_ext_control *)kcontrols;
47018+ kp->controls = (__force_kernel struct v4l2_ext_control *)kcontrols;
47019 while (--n >= 0) {
47020 u32 id;
47021
47022@@ -696,7 +696,7 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
47023 {
47024 struct v4l2_ext_control32 __user *ucontrols;
47025 struct v4l2_ext_control __user *kcontrols =
47026- (__force struct v4l2_ext_control __user *)kp->controls;
47027+ (struct v4l2_ext_control __force_user *)kp->controls;
47028 int n = kp->count;
47029 compat_caddr_t p;
47030
47031@@ -780,7 +780,7 @@ static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
47032 get_user(tmp, &up->edid) ||
47033 copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
47034 return -EFAULT;
47035- kp->edid = (__force u8 *)compat_ptr(tmp);
47036+ kp->edid = (__force_kernel u8 *)compat_ptr(tmp);
47037 return 0;
47038 }
47039
47040diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
47041index 015f92a..59e311e 100644
47042--- a/drivers/media/v4l2-core/v4l2-device.c
47043+++ b/drivers/media/v4l2-core/v4l2-device.c
47044@@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
47045 EXPORT_SYMBOL_GPL(v4l2_device_put);
47046
47047 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
47048- atomic_t *instance)
47049+ atomic_unchecked_t *instance)
47050 {
47051- int num = atomic_inc_return(instance) - 1;
47052+ int num = atomic_inc_return_unchecked(instance) - 1;
47053 int len = strlen(basename);
47054
47055 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
47056diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
47057index b084072..36706d7 100644
47058--- a/drivers/media/v4l2-core/v4l2-ioctl.c
47059+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
47060@@ -2151,7 +2151,8 @@ struct v4l2_ioctl_info {
47061 struct file *file, void *fh, void *p);
47062 } u;
47063 void (*debug)(const void *arg, bool write_only);
47064-};
47065+} __do_const;
47066+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
47067
47068 /* This control needs a priority check */
47069 #define INFO_FL_PRIO (1 << 0)
47070@@ -2335,7 +2336,7 @@ static long __video_do_ioctl(struct file *file,
47071 struct video_device *vfd = video_devdata(file);
47072 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
47073 bool write_only = false;
47074- struct v4l2_ioctl_info default_info;
47075+ v4l2_ioctl_info_no_const default_info;
47076 const struct v4l2_ioctl_info *info;
47077 void *fh = file->private_data;
47078 struct v4l2_fh *vfh = NULL;
47079@@ -2426,7 +2427,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47080 ret = -EINVAL;
47081 break;
47082 }
47083- *user_ptr = (void __user *)buf->m.planes;
47084+ *user_ptr = (void __force_user *)buf->m.planes;
47085 *kernel_ptr = (void **)&buf->m.planes;
47086 *array_size = sizeof(struct v4l2_plane) * buf->length;
47087 ret = 1;
47088@@ -2443,7 +2444,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47089 ret = -EINVAL;
47090 break;
47091 }
47092- *user_ptr = (void __user *)edid->edid;
47093+ *user_ptr = (void __force_user *)edid->edid;
47094 *kernel_ptr = (void **)&edid->edid;
47095 *array_size = edid->blocks * 128;
47096 ret = 1;
47097@@ -2461,7 +2462,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47098 ret = -EINVAL;
47099 break;
47100 }
47101- *user_ptr = (void __user *)ctrls->controls;
47102+ *user_ptr = (void __force_user *)ctrls->controls;
47103 *kernel_ptr = (void **)&ctrls->controls;
47104 *array_size = sizeof(struct v4l2_ext_control)
47105 * ctrls->count;
47106@@ -2562,7 +2563,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
47107 }
47108
47109 if (has_array_args) {
47110- *kernel_ptr = (void __force *)user_ptr;
47111+ *kernel_ptr = (void __force_kernel *)user_ptr;
47112 if (copy_to_user(user_ptr, mbuf, array_size))
47113 err = -EFAULT;
47114 goto out_array_args;
47115diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
47116index 24696f5..3637780 100644
47117--- a/drivers/memory/omap-gpmc.c
47118+++ b/drivers/memory/omap-gpmc.c
47119@@ -211,7 +211,6 @@ struct omap3_gpmc_regs {
47120 };
47121
47122 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
47123-static struct irq_chip gpmc_irq_chip;
47124 static int gpmc_irq_start;
47125
47126 static struct resource gpmc_mem_root;
47127@@ -939,6 +938,17 @@ static void gpmc_irq_noop(struct irq_data *data) { }
47128
47129 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
47130
47131+static struct irq_chip gpmc_irq_chip = {
47132+ .name = "gpmc",
47133+ .irq_startup = gpmc_irq_noop_ret,
47134+ .irq_enable = gpmc_irq_enable,
47135+ .irq_disable = gpmc_irq_disable,
47136+ .irq_shutdown = gpmc_irq_noop,
47137+ .irq_ack = gpmc_irq_noop,
47138+ .irq_mask = gpmc_irq_noop,
47139+ .irq_unmask = gpmc_irq_noop,
47140+};
47141+
47142 static int gpmc_setup_irq(void)
47143 {
47144 int i;
47145@@ -953,15 +963,6 @@ static int gpmc_setup_irq(void)
47146 return gpmc_irq_start;
47147 }
47148
47149- gpmc_irq_chip.name = "gpmc";
47150- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
47151- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
47152- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
47153- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
47154- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
47155- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
47156- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
47157-
47158 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
47159 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
47160
47161diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
47162index 187f836..679544b 100644
47163--- a/drivers/message/fusion/mptbase.c
47164+++ b/drivers/message/fusion/mptbase.c
47165@@ -6746,8 +6746,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
47166 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
47167 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
47168
47169+#ifdef CONFIG_GRKERNSEC_HIDESYM
47170+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
47171+#else
47172 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
47173 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
47174+#endif
47175+
47176 /*
47177 * Rounding UP to nearest 4-kB boundary here...
47178 */
47179@@ -6760,7 +6765,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
47180 ioc->facts.GlobalCredits);
47181
47182 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
47183+#ifdef CONFIG_GRKERNSEC_HIDESYM
47184+ NULL, NULL);
47185+#else
47186 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
47187+#endif
47188 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
47189 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
47190 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
47191diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
47192index 5bdaae1..eced16f 100644
47193--- a/drivers/message/fusion/mptsas.c
47194+++ b/drivers/message/fusion/mptsas.c
47195@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
47196 return 0;
47197 }
47198
47199+static inline void
47200+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
47201+{
47202+ if (phy_info->port_details) {
47203+ phy_info->port_details->rphy = rphy;
47204+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
47205+ ioc->name, rphy));
47206+ }
47207+
47208+ if (rphy) {
47209+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
47210+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
47211+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
47212+ ioc->name, rphy, rphy->dev.release));
47213+ }
47214+}
47215+
47216 /* no mutex */
47217 static void
47218 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
47219@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
47220 return NULL;
47221 }
47222
47223-static inline void
47224-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
47225-{
47226- if (phy_info->port_details) {
47227- phy_info->port_details->rphy = rphy;
47228- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
47229- ioc->name, rphy));
47230- }
47231-
47232- if (rphy) {
47233- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
47234- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
47235- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
47236- ioc->name, rphy, rphy->dev.release));
47237- }
47238-}
47239-
47240 static inline struct sas_port *
47241 mptsas_get_port(struct mptsas_phyinfo *phy_info)
47242 {
47243diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
47244index 9a8e185..27ff17d 100644
47245--- a/drivers/mfd/ab8500-debugfs.c
47246+++ b/drivers/mfd/ab8500-debugfs.c
47247@@ -100,7 +100,7 @@ static int irq_last;
47248 static u32 *irq_count;
47249 static int num_irqs;
47250
47251-static struct device_attribute **dev_attr;
47252+static device_attribute_no_const **dev_attr;
47253 static char **event_name;
47254
47255 static u8 avg_sample = SAMPLE_16;
47256diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
47257index 5615522..1eb6f3dc 100644
47258--- a/drivers/mfd/kempld-core.c
47259+++ b/drivers/mfd/kempld-core.c
47260@@ -499,7 +499,7 @@ static struct platform_driver kempld_driver = {
47261 .remove = kempld_remove,
47262 };
47263
47264-static struct dmi_system_id kempld_dmi_table[] __initdata = {
47265+static const struct dmi_system_id kempld_dmi_table[] __initconst = {
47266 {
47267 .ident = "BHL6",
47268 .matches = {
47269diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
47270index c880c89..45a7c68 100644
47271--- a/drivers/mfd/max8925-i2c.c
47272+++ b/drivers/mfd/max8925-i2c.c
47273@@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
47274 const struct i2c_device_id *id)
47275 {
47276 struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
47277- static struct max8925_chip *chip;
47278+ struct max8925_chip *chip;
47279 struct device_node *node = client->dev.of_node;
47280
47281 if (node && !pdata) {
47282diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
47283index 7612d89..70549c2 100644
47284--- a/drivers/mfd/tps65910.c
47285+++ b/drivers/mfd/tps65910.c
47286@@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
47287 struct tps65910_platform_data *pdata)
47288 {
47289 int ret = 0;
47290- static struct regmap_irq_chip *tps6591x_irqs_chip;
47291+ struct regmap_irq_chip *tps6591x_irqs_chip;
47292
47293 if (!irq) {
47294 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
47295diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
47296index 1b772ef..01e77d33 100644
47297--- a/drivers/mfd/twl4030-irq.c
47298+++ b/drivers/mfd/twl4030-irq.c
47299@@ -34,6 +34,7 @@
47300 #include <linux/of.h>
47301 #include <linux/irqdomain.h>
47302 #include <linux/i2c/twl.h>
47303+#include <asm/pgtable.h>
47304
47305 #include "twl-core.h"
47306
47307@@ -729,10 +730,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
47308 * Install an irq handler for each of the SIH modules;
47309 * clone dummy irq_chip since PIH can't *do* anything
47310 */
47311- twl4030_irq_chip = dummy_irq_chip;
47312- twl4030_irq_chip.name = "twl4030";
47313+ pax_open_kernel();
47314+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
47315+ *(const char **)&twl4030_irq_chip.name = "twl4030";
47316
47317- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
47318+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
47319+ pax_close_kernel();
47320
47321 for (i = irq_base; i < irq_end; i++) {
47322 irq_set_chip_and_handler(i, &twl4030_irq_chip,
47323diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
47324index 464419b..64bae8d 100644
47325--- a/drivers/misc/c2port/core.c
47326+++ b/drivers/misc/c2port/core.c
47327@@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
47328 goto error_idr_alloc;
47329 c2dev->id = ret;
47330
47331- bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
47332+ pax_open_kernel();
47333+ *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
47334+ pax_close_kernel();
47335
47336 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
47337 "c2port%d", c2dev->id);
47338diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
47339index 8385177..2f54635 100644
47340--- a/drivers/misc/eeprom/sunxi_sid.c
47341+++ b/drivers/misc/eeprom/sunxi_sid.c
47342@@ -126,7 +126,9 @@ static int sunxi_sid_probe(struct platform_device *pdev)
47343
47344 platform_set_drvdata(pdev, sid_data);
47345
47346- sid_bin_attr.size = sid_data->keysize;
47347+ pax_open_kernel();
47348+ *(size_t *)&sid_bin_attr.size = sid_data->keysize;
47349+ pax_close_kernel();
47350 if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
47351 return -ENODEV;
47352
47353diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
47354index 36f5d52..32311c3 100644
47355--- a/drivers/misc/kgdbts.c
47356+++ b/drivers/misc/kgdbts.c
47357@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
47358 char before[BREAK_INSTR_SIZE];
47359 char after[BREAK_INSTR_SIZE];
47360
47361- probe_kernel_read(before, (char *)kgdbts_break_test,
47362+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
47363 BREAK_INSTR_SIZE);
47364 init_simple_test();
47365 ts.tst = plant_and_detach_test;
47366@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
47367 /* Activate test with initial breakpoint */
47368 if (!is_early)
47369 kgdb_breakpoint();
47370- probe_kernel_read(after, (char *)kgdbts_break_test,
47371+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
47372 BREAK_INSTR_SIZE);
47373 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
47374 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
47375diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
47376index 3ef4627..8d00486 100644
47377--- a/drivers/misc/lis3lv02d/lis3lv02d.c
47378+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
47379@@ -497,7 +497,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
47380 * the lid is closed. This leads to interrupts as soon as a little move
47381 * is done.
47382 */
47383- atomic_inc(&lis3->count);
47384+ atomic_inc_unchecked(&lis3->count);
47385
47386 wake_up_interruptible(&lis3->misc_wait);
47387 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
47388@@ -583,7 +583,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
47389 if (lis3->pm_dev)
47390 pm_runtime_get_sync(lis3->pm_dev);
47391
47392- atomic_set(&lis3->count, 0);
47393+ atomic_set_unchecked(&lis3->count, 0);
47394 return 0;
47395 }
47396
47397@@ -615,7 +615,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
47398 add_wait_queue(&lis3->misc_wait, &wait);
47399 while (true) {
47400 set_current_state(TASK_INTERRUPTIBLE);
47401- data = atomic_xchg(&lis3->count, 0);
47402+ data = atomic_xchg_unchecked(&lis3->count, 0);
47403 if (data)
47404 break;
47405
47406@@ -656,7 +656,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
47407 struct lis3lv02d, miscdev);
47408
47409 poll_wait(file, &lis3->misc_wait, wait);
47410- if (atomic_read(&lis3->count))
47411+ if (atomic_read_unchecked(&lis3->count))
47412 return POLLIN | POLLRDNORM;
47413 return 0;
47414 }
47415diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
47416index c439c82..1f20f57 100644
47417--- a/drivers/misc/lis3lv02d/lis3lv02d.h
47418+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
47419@@ -297,7 +297,7 @@ struct lis3lv02d {
47420 struct input_polled_dev *idev; /* input device */
47421 struct platform_device *pdev; /* platform device */
47422 struct regulator_bulk_data regulators[2];
47423- atomic_t count; /* interrupt count after last read */
47424+ atomic_unchecked_t count; /* interrupt count after last read */
47425 union axis_conversion ac; /* hw -> logical axis */
47426 int mapped_btns[3];
47427
47428diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
47429index 2f30bad..c4c13d0 100644
47430--- a/drivers/misc/sgi-gru/gruhandles.c
47431+++ b/drivers/misc/sgi-gru/gruhandles.c
47432@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
47433 unsigned long nsec;
47434
47435 nsec = CLKS2NSEC(clks);
47436- atomic_long_inc(&mcs_op_statistics[op].count);
47437- atomic_long_add(nsec, &mcs_op_statistics[op].total);
47438+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
47439+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
47440 if (mcs_op_statistics[op].max < nsec)
47441 mcs_op_statistics[op].max = nsec;
47442 }
47443diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
47444index 4f76359..cdfcb2e 100644
47445--- a/drivers/misc/sgi-gru/gruprocfs.c
47446+++ b/drivers/misc/sgi-gru/gruprocfs.c
47447@@ -32,9 +32,9 @@
47448
47449 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
47450
47451-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
47452+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
47453 {
47454- unsigned long val = atomic_long_read(v);
47455+ unsigned long val = atomic_long_read_unchecked(v);
47456
47457 seq_printf(s, "%16lu %s\n", val, id);
47458 }
47459@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
47460
47461 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
47462 for (op = 0; op < mcsop_last; op++) {
47463- count = atomic_long_read(&mcs_op_statistics[op].count);
47464- total = atomic_long_read(&mcs_op_statistics[op].total);
47465+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
47466+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
47467 max = mcs_op_statistics[op].max;
47468 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
47469 count ? total / count : 0, max);
47470diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
47471index 5c3ce24..4915ccb 100644
47472--- a/drivers/misc/sgi-gru/grutables.h
47473+++ b/drivers/misc/sgi-gru/grutables.h
47474@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
47475 * GRU statistics.
47476 */
47477 struct gru_stats_s {
47478- atomic_long_t vdata_alloc;
47479- atomic_long_t vdata_free;
47480- atomic_long_t gts_alloc;
47481- atomic_long_t gts_free;
47482- atomic_long_t gms_alloc;
47483- atomic_long_t gms_free;
47484- atomic_long_t gts_double_allocate;
47485- atomic_long_t assign_context;
47486- atomic_long_t assign_context_failed;
47487- atomic_long_t free_context;
47488- atomic_long_t load_user_context;
47489- atomic_long_t load_kernel_context;
47490- atomic_long_t lock_kernel_context;
47491- atomic_long_t unlock_kernel_context;
47492- atomic_long_t steal_user_context;
47493- atomic_long_t steal_kernel_context;
47494- atomic_long_t steal_context_failed;
47495- atomic_long_t nopfn;
47496- atomic_long_t asid_new;
47497- atomic_long_t asid_next;
47498- atomic_long_t asid_wrap;
47499- atomic_long_t asid_reuse;
47500- atomic_long_t intr;
47501- atomic_long_t intr_cbr;
47502- atomic_long_t intr_tfh;
47503- atomic_long_t intr_spurious;
47504- atomic_long_t intr_mm_lock_failed;
47505- atomic_long_t call_os;
47506- atomic_long_t call_os_wait_queue;
47507- atomic_long_t user_flush_tlb;
47508- atomic_long_t user_unload_context;
47509- atomic_long_t user_exception;
47510- atomic_long_t set_context_option;
47511- atomic_long_t check_context_retarget_intr;
47512- atomic_long_t check_context_unload;
47513- atomic_long_t tlb_dropin;
47514- atomic_long_t tlb_preload_page;
47515- atomic_long_t tlb_dropin_fail_no_asid;
47516- atomic_long_t tlb_dropin_fail_upm;
47517- atomic_long_t tlb_dropin_fail_invalid;
47518- atomic_long_t tlb_dropin_fail_range_active;
47519- atomic_long_t tlb_dropin_fail_idle;
47520- atomic_long_t tlb_dropin_fail_fmm;
47521- atomic_long_t tlb_dropin_fail_no_exception;
47522- atomic_long_t tfh_stale_on_fault;
47523- atomic_long_t mmu_invalidate_range;
47524- atomic_long_t mmu_invalidate_page;
47525- atomic_long_t flush_tlb;
47526- atomic_long_t flush_tlb_gru;
47527- atomic_long_t flush_tlb_gru_tgh;
47528- atomic_long_t flush_tlb_gru_zero_asid;
47529+ atomic_long_unchecked_t vdata_alloc;
47530+ atomic_long_unchecked_t vdata_free;
47531+ atomic_long_unchecked_t gts_alloc;
47532+ atomic_long_unchecked_t gts_free;
47533+ atomic_long_unchecked_t gms_alloc;
47534+ atomic_long_unchecked_t gms_free;
47535+ atomic_long_unchecked_t gts_double_allocate;
47536+ atomic_long_unchecked_t assign_context;
47537+ atomic_long_unchecked_t assign_context_failed;
47538+ atomic_long_unchecked_t free_context;
47539+ atomic_long_unchecked_t load_user_context;
47540+ atomic_long_unchecked_t load_kernel_context;
47541+ atomic_long_unchecked_t lock_kernel_context;
47542+ atomic_long_unchecked_t unlock_kernel_context;
47543+ atomic_long_unchecked_t steal_user_context;
47544+ atomic_long_unchecked_t steal_kernel_context;
47545+ atomic_long_unchecked_t steal_context_failed;
47546+ atomic_long_unchecked_t nopfn;
47547+ atomic_long_unchecked_t asid_new;
47548+ atomic_long_unchecked_t asid_next;
47549+ atomic_long_unchecked_t asid_wrap;
47550+ atomic_long_unchecked_t asid_reuse;
47551+ atomic_long_unchecked_t intr;
47552+ atomic_long_unchecked_t intr_cbr;
47553+ atomic_long_unchecked_t intr_tfh;
47554+ atomic_long_unchecked_t intr_spurious;
47555+ atomic_long_unchecked_t intr_mm_lock_failed;
47556+ atomic_long_unchecked_t call_os;
47557+ atomic_long_unchecked_t call_os_wait_queue;
47558+ atomic_long_unchecked_t user_flush_tlb;
47559+ atomic_long_unchecked_t user_unload_context;
47560+ atomic_long_unchecked_t user_exception;
47561+ atomic_long_unchecked_t set_context_option;
47562+ atomic_long_unchecked_t check_context_retarget_intr;
47563+ atomic_long_unchecked_t check_context_unload;
47564+ atomic_long_unchecked_t tlb_dropin;
47565+ atomic_long_unchecked_t tlb_preload_page;
47566+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
47567+ atomic_long_unchecked_t tlb_dropin_fail_upm;
47568+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
47569+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
47570+ atomic_long_unchecked_t tlb_dropin_fail_idle;
47571+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
47572+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
47573+ atomic_long_unchecked_t tfh_stale_on_fault;
47574+ atomic_long_unchecked_t mmu_invalidate_range;
47575+ atomic_long_unchecked_t mmu_invalidate_page;
47576+ atomic_long_unchecked_t flush_tlb;
47577+ atomic_long_unchecked_t flush_tlb_gru;
47578+ atomic_long_unchecked_t flush_tlb_gru_tgh;
47579+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
47580
47581- atomic_long_t copy_gpa;
47582- atomic_long_t read_gpa;
47583+ atomic_long_unchecked_t copy_gpa;
47584+ atomic_long_unchecked_t read_gpa;
47585
47586- atomic_long_t mesq_receive;
47587- atomic_long_t mesq_receive_none;
47588- atomic_long_t mesq_send;
47589- atomic_long_t mesq_send_failed;
47590- atomic_long_t mesq_noop;
47591- atomic_long_t mesq_send_unexpected_error;
47592- atomic_long_t mesq_send_lb_overflow;
47593- atomic_long_t mesq_send_qlimit_reached;
47594- atomic_long_t mesq_send_amo_nacked;
47595- atomic_long_t mesq_send_put_nacked;
47596- atomic_long_t mesq_page_overflow;
47597- atomic_long_t mesq_qf_locked;
47598- atomic_long_t mesq_qf_noop_not_full;
47599- atomic_long_t mesq_qf_switch_head_failed;
47600- atomic_long_t mesq_qf_unexpected_error;
47601- atomic_long_t mesq_noop_unexpected_error;
47602- atomic_long_t mesq_noop_lb_overflow;
47603- atomic_long_t mesq_noop_qlimit_reached;
47604- atomic_long_t mesq_noop_amo_nacked;
47605- atomic_long_t mesq_noop_put_nacked;
47606- atomic_long_t mesq_noop_page_overflow;
47607+ atomic_long_unchecked_t mesq_receive;
47608+ atomic_long_unchecked_t mesq_receive_none;
47609+ atomic_long_unchecked_t mesq_send;
47610+ atomic_long_unchecked_t mesq_send_failed;
47611+ atomic_long_unchecked_t mesq_noop;
47612+ atomic_long_unchecked_t mesq_send_unexpected_error;
47613+ atomic_long_unchecked_t mesq_send_lb_overflow;
47614+ atomic_long_unchecked_t mesq_send_qlimit_reached;
47615+ atomic_long_unchecked_t mesq_send_amo_nacked;
47616+ atomic_long_unchecked_t mesq_send_put_nacked;
47617+ atomic_long_unchecked_t mesq_page_overflow;
47618+ atomic_long_unchecked_t mesq_qf_locked;
47619+ atomic_long_unchecked_t mesq_qf_noop_not_full;
47620+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
47621+ atomic_long_unchecked_t mesq_qf_unexpected_error;
47622+ atomic_long_unchecked_t mesq_noop_unexpected_error;
47623+ atomic_long_unchecked_t mesq_noop_lb_overflow;
47624+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
47625+ atomic_long_unchecked_t mesq_noop_amo_nacked;
47626+ atomic_long_unchecked_t mesq_noop_put_nacked;
47627+ atomic_long_unchecked_t mesq_noop_page_overflow;
47628
47629 };
47630
47631@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
47632 tghop_invalidate, mcsop_last};
47633
47634 struct mcs_op_statistic {
47635- atomic_long_t count;
47636- atomic_long_t total;
47637+ atomic_long_unchecked_t count;
47638+ atomic_long_unchecked_t total;
47639 unsigned long max;
47640 };
47641
47642@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
47643
47644 #define STAT(id) do { \
47645 if (gru_options & OPT_STATS) \
47646- atomic_long_inc(&gru_stats.id); \
47647+ atomic_long_inc_unchecked(&gru_stats.id); \
47648 } while (0)
47649
47650 #ifdef CONFIG_SGI_GRU_DEBUG
47651diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
47652index c862cd4..0d176fe 100644
47653--- a/drivers/misc/sgi-xp/xp.h
47654+++ b/drivers/misc/sgi-xp/xp.h
47655@@ -288,7 +288,7 @@ struct xpc_interface {
47656 xpc_notify_func, void *);
47657 void (*received) (short, int, void *);
47658 enum xp_retval (*partid_to_nasids) (short, void *);
47659-};
47660+} __no_const;
47661
47662 extern struct xpc_interface xpc_interface;
47663
47664diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
47665index 01be66d..e3a0c7e 100644
47666--- a/drivers/misc/sgi-xp/xp_main.c
47667+++ b/drivers/misc/sgi-xp/xp_main.c
47668@@ -78,13 +78,13 @@ xpc_notloaded(void)
47669 }
47670
47671 struct xpc_interface xpc_interface = {
47672- (void (*)(int))xpc_notloaded,
47673- (void (*)(int))xpc_notloaded,
47674- (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
47675- (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
47676+ .connect = (void (*)(int))xpc_notloaded,
47677+ .disconnect = (void (*)(int))xpc_notloaded,
47678+ .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
47679+ .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
47680 void *))xpc_notloaded,
47681- (void (*)(short, int, void *))xpc_notloaded,
47682- (enum xp_retval(*)(short, void *))xpc_notloaded
47683+ .received = (void (*)(short, int, void *))xpc_notloaded,
47684+ .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
47685 };
47686 EXPORT_SYMBOL_GPL(xpc_interface);
47687
47688diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
47689index b94d5f7..7f494c5 100644
47690--- a/drivers/misc/sgi-xp/xpc.h
47691+++ b/drivers/misc/sgi-xp/xpc.h
47692@@ -835,6 +835,7 @@ struct xpc_arch_operations {
47693 void (*received_payload) (struct xpc_channel *, void *);
47694 void (*notify_senders_of_disconnect) (struct xpc_channel *);
47695 };
47696+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
47697
47698 /* struct xpc_partition act_state values (for XPC HB) */
47699
47700@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
47701 /* found in xpc_main.c */
47702 extern struct device *xpc_part;
47703 extern struct device *xpc_chan;
47704-extern struct xpc_arch_operations xpc_arch_ops;
47705+extern xpc_arch_operations_no_const xpc_arch_ops;
47706 extern int xpc_disengage_timelimit;
47707 extern int xpc_disengage_timedout;
47708 extern int xpc_activate_IRQ_rcvd;
47709diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
47710index 82dc574..8539ab2 100644
47711--- a/drivers/misc/sgi-xp/xpc_main.c
47712+++ b/drivers/misc/sgi-xp/xpc_main.c
47713@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
47714 .notifier_call = xpc_system_die,
47715 };
47716
47717-struct xpc_arch_operations xpc_arch_ops;
47718+xpc_arch_operations_no_const xpc_arch_ops;
47719
47720 /*
47721 * Timer function to enforce the timelimit on the partition disengage.
47722@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
47723
47724 if (((die_args->trapnr == X86_TRAP_MF) ||
47725 (die_args->trapnr == X86_TRAP_XF)) &&
47726- !user_mode_vm(die_args->regs))
47727+ !user_mode(die_args->regs))
47728 xpc_die_deactivate();
47729
47730 break;
47731diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
47732index ed2e71a..54c498e 100644
47733--- a/drivers/mmc/card/block.c
47734+++ b/drivers/mmc/card/block.c
47735@@ -577,7 +577,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
47736 if (idata->ic.postsleep_min_us)
47737 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
47738
47739- if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
47740+ if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
47741 err = -EFAULT;
47742 goto cmd_rel_host;
47743 }
47744diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
47745index 18c4afe..43be71e 100644
47746--- a/drivers/mmc/host/dw_mmc.h
47747+++ b/drivers/mmc/host/dw_mmc.h
47748@@ -271,5 +271,5 @@ struct dw_mci_drv_data {
47749 void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
47750 int (*parse_dt)(struct dw_mci *host);
47751 int (*execute_tuning)(struct dw_mci_slot *slot);
47752-};
47753+} __do_const;
47754 #endif /* _DW_MMC_H_ */
47755diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
47756index 7fe1619..ae0781b 100644
47757--- a/drivers/mmc/host/mmci.c
47758+++ b/drivers/mmc/host/mmci.c
47759@@ -1630,7 +1630,9 @@ static int mmci_probe(struct amba_device *dev,
47760 mmc->caps |= MMC_CAP_CMD23;
47761
47762 if (variant->busy_detect) {
47763- mmci_ops.card_busy = mmci_card_busy;
47764+ pax_open_kernel();
47765+ *(void **)&mmci_ops.card_busy = mmci_card_busy;
47766+ pax_close_kernel();
47767 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
47768 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
47769 mmc->max_busy_timeout = 0;
47770diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
47771index f84cfb0..aebe5d6 100644
47772--- a/drivers/mmc/host/omap_hsmmc.c
47773+++ b/drivers/mmc/host/omap_hsmmc.c
47774@@ -2054,7 +2054,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
47775
47776 if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
47777 dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n");
47778- omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
47779+ pax_open_kernel();
47780+ *(void **)&omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
47781+ pax_close_kernel();
47782 }
47783
47784 pm_runtime_enable(host->dev);
47785diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
47786index 10ef824..88461a2 100644
47787--- a/drivers/mmc/host/sdhci-esdhc-imx.c
47788+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
47789@@ -989,9 +989,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
47790 host->mmc->caps |= MMC_CAP_1_8V_DDR;
47791 }
47792
47793- if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
47794- sdhci_esdhc_ops.platform_execute_tuning =
47795+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
47796+ pax_open_kernel();
47797+ *(void **)&sdhci_esdhc_ops.platform_execute_tuning =
47798 esdhc_executing_tuning;
47799+ pax_close_kernel();
47800+ }
47801
47802 if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
47803 writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
47804diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
47805index c6d2dd7..81b1ca3 100644
47806--- a/drivers/mmc/host/sdhci-s3c.c
47807+++ b/drivers/mmc/host/sdhci-s3c.c
47808@@ -598,9 +598,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
47809 * we can use overriding functions instead of default.
47810 */
47811 if (sc->no_divider) {
47812- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
47813- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
47814- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
47815+ pax_open_kernel();
47816+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
47817+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
47818+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
47819+ pax_close_kernel();
47820 }
47821
47822 /* It supports additional host capabilities if needed */
47823diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
47824index 423666b..81ff5eb 100644
47825--- a/drivers/mtd/chips/cfi_cmdset_0020.c
47826+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
47827@@ -666,7 +666,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
47828 size_t totlen = 0, thislen;
47829 int ret = 0;
47830 size_t buflen = 0;
47831- static char *buffer;
47832+ char *buffer;
47833
47834 if (!ECCBUF_SIZE) {
47835 /* We should fall back to a general writev implementation.
47836diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
47837index f44c606..aa4e804 100644
47838--- a/drivers/mtd/nand/denali.c
47839+++ b/drivers/mtd/nand/denali.c
47840@@ -24,6 +24,7 @@
47841 #include <linux/slab.h>
47842 #include <linux/mtd/mtd.h>
47843 #include <linux/module.h>
47844+#include <linux/slab.h>
47845
47846 #include "denali.h"
47847
47848diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
47849index 33f3c3c..d6bbe6a 100644
47850--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
47851+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
47852@@ -386,7 +386,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
47853
47854 /* first try to map the upper buffer directly */
47855 if (virt_addr_valid(this->upper_buf) &&
47856- !object_is_on_stack(this->upper_buf)) {
47857+ !object_starts_on_stack(this->upper_buf)) {
47858 sg_init_one(sgl, this->upper_buf, this->upper_len);
47859 ret = dma_map_sg(this->dev, sgl, 1, dr);
47860 if (ret == 0)
47861diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
47862index a5dfbfb..8042ab4 100644
47863--- a/drivers/mtd/nftlmount.c
47864+++ b/drivers/mtd/nftlmount.c
47865@@ -24,6 +24,7 @@
47866 #include <asm/errno.h>
47867 #include <linux/delay.h>
47868 #include <linux/slab.h>
47869+#include <linux/sched.h>
47870 #include <linux/mtd/mtd.h>
47871 #include <linux/mtd/nand.h>
47872 #include <linux/mtd/nftl.h>
47873diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
47874index c23184a..4115c41 100644
47875--- a/drivers/mtd/sm_ftl.c
47876+++ b/drivers/mtd/sm_ftl.c
47877@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
47878 #define SM_CIS_VENDOR_OFFSET 0x59
47879 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
47880 {
47881- struct attribute_group *attr_group;
47882+ attribute_group_no_const *attr_group;
47883 struct attribute **attributes;
47884 struct sm_sysfs_attribute *vendor_attribute;
47885 char *vendor;
47886diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
47887index 7b11243..b3278a3 100644
47888--- a/drivers/net/bonding/bond_netlink.c
47889+++ b/drivers/net/bonding/bond_netlink.c
47890@@ -585,7 +585,7 @@ nla_put_failure:
47891 return -EMSGSIZE;
47892 }
47893
47894-struct rtnl_link_ops bond_link_ops __read_mostly = {
47895+struct rtnl_link_ops bond_link_ops = {
47896 .kind = "bond",
47897 .priv_size = sizeof(struct bonding),
47898 .setup = bond_setup,
47899diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
47900index b3b922a..80bba38 100644
47901--- a/drivers/net/caif/caif_hsi.c
47902+++ b/drivers/net/caif/caif_hsi.c
47903@@ -1444,7 +1444,7 @@ err:
47904 return -ENODEV;
47905 }
47906
47907-static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
47908+static struct rtnl_link_ops caif_hsi_link_ops = {
47909 .kind = "cfhsi",
47910 .priv_size = sizeof(struct cfhsi),
47911 .setup = cfhsi_setup,
47912diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
47913index 58808f65..0bdc7b3 100644
47914--- a/drivers/net/can/Kconfig
47915+++ b/drivers/net/can/Kconfig
47916@@ -98,7 +98,7 @@ config CAN_JANZ_ICAN3
47917
47918 config CAN_FLEXCAN
47919 tristate "Support for Freescale FLEXCAN based chips"
47920- depends on ARM || PPC
47921+ depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
47922 ---help---
47923 Say Y here if you want to support for Freescale FlexCAN.
47924
47925diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
47926index b0f6924..59e9640 100644
47927--- a/drivers/net/can/dev.c
47928+++ b/drivers/net/can/dev.c
47929@@ -959,7 +959,7 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
47930 return -EOPNOTSUPP;
47931 }
47932
47933-static struct rtnl_link_ops can_link_ops __read_mostly = {
47934+static struct rtnl_link_ops can_link_ops = {
47935 .kind = "can",
47936 .maxtype = IFLA_CAN_MAX,
47937 .policy = can_policy,
47938diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
47939index 674f367..ec3a31f 100644
47940--- a/drivers/net/can/vcan.c
47941+++ b/drivers/net/can/vcan.c
47942@@ -163,7 +163,7 @@ static void vcan_setup(struct net_device *dev)
47943 dev->destructor = free_netdev;
47944 }
47945
47946-static struct rtnl_link_ops vcan_link_ops __read_mostly = {
47947+static struct rtnl_link_ops vcan_link_ops = {
47948 .kind = "vcan",
47949 .setup = vcan_setup,
47950 };
47951diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
47952index 49adbf1..fff7ff8 100644
47953--- a/drivers/net/dummy.c
47954+++ b/drivers/net/dummy.c
47955@@ -164,7 +164,7 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
47956 return 0;
47957 }
47958
47959-static struct rtnl_link_ops dummy_link_ops __read_mostly = {
47960+static struct rtnl_link_ops dummy_link_ops = {
47961 .kind = DRV_NAME,
47962 .setup = dummy_setup,
47963 .validate = dummy_validate,
47964diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
47965index 0443654..4f0aa18 100644
47966--- a/drivers/net/ethernet/8390/ax88796.c
47967+++ b/drivers/net/ethernet/8390/ax88796.c
47968@@ -889,9 +889,11 @@ static int ax_probe(struct platform_device *pdev)
47969 if (ax->plat->reg_offsets)
47970 ei_local->reg_offset = ax->plat->reg_offsets;
47971 else {
47972+ resource_size_t _mem_size = mem_size;
47973+ do_div(_mem_size, 0x18);
47974 ei_local->reg_offset = ax->reg_offsets;
47975 for (ret = 0; ret < 0x18; ret++)
47976- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
47977+ ax->reg_offsets[ret] = _mem_size * ret;
47978 }
47979
47980 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
47981diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
47982index 6725dc0..163549c 100644
47983--- a/drivers/net/ethernet/altera/altera_tse_main.c
47984+++ b/drivers/net/ethernet/altera/altera_tse_main.c
47985@@ -1216,7 +1216,7 @@ static int tse_shutdown(struct net_device *dev)
47986 return 0;
47987 }
47988
47989-static struct net_device_ops altera_tse_netdev_ops = {
47990+static net_device_ops_no_const altera_tse_netdev_ops __read_only = {
47991 .ndo_open = tse_open,
47992 .ndo_stop = tse_shutdown,
47993 .ndo_start_xmit = tse_start_xmit,
47994@@ -1453,11 +1453,13 @@ static int altera_tse_probe(struct platform_device *pdev)
47995 ndev->netdev_ops = &altera_tse_netdev_ops;
47996 altera_tse_set_ethtool_ops(ndev);
47997
47998+ pax_open_kernel();
47999 altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
48000
48001 if (priv->hash_filter)
48002 altera_tse_netdev_ops.ndo_set_rx_mode =
48003 tse_set_rx_mode_hashfilter;
48004+ pax_close_kernel();
48005
48006 /* Scatter/gather IO is not supported,
48007 * so it is turned off
48008diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48009index 29a0927..5a348e24 100644
48010--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48011+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48012@@ -1122,14 +1122,14 @@ do { \
48013 * operations, everything works on mask values.
48014 */
48015 #define XMDIO_READ(_pdata, _mmd, _reg) \
48016- ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
48017+ ((_pdata)->hw_if->read_mmd_regs((_pdata), 0, \
48018 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
48019
48020 #define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
48021 (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
48022
48023 #define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
48024- ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
48025+ ((_pdata)->hw_if->write_mmd_regs((_pdata), 0, \
48026 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
48027
48028 #define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
48029diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
48030index 8a50b01..39c1ad0 100644
48031--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
48032+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
48033@@ -187,7 +187,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
48034
48035 memcpy(pdata->ets, ets, sizeof(*pdata->ets));
48036
48037- pdata->hw_if.config_dcb_tc(pdata);
48038+ pdata->hw_if->config_dcb_tc(pdata);
48039
48040 return 0;
48041 }
48042@@ -226,7 +226,7 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
48043
48044 memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc));
48045
48046- pdata->hw_if.config_dcb_pfc(pdata);
48047+ pdata->hw_if->config_dcb_pfc(pdata);
48048
48049 return 0;
48050 }
48051diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48052index d81fc6b..6f8ab25 100644
48053--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48054+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48055@@ -347,7 +347,7 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
48056
48057 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
48058 {
48059- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48060+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48061 struct xgbe_channel *channel;
48062 struct xgbe_ring *ring;
48063 struct xgbe_ring_data *rdata;
48064@@ -388,7 +388,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
48065
48066 static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
48067 {
48068- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48069+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48070 struct xgbe_channel *channel;
48071 struct xgbe_ring *ring;
48072 struct xgbe_ring_desc *rdesc;
48073@@ -620,17 +620,12 @@ err_out:
48074 return 0;
48075 }
48076
48077-void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
48078-{
48079- DBGPR("-->xgbe_init_function_ptrs_desc\n");
48080-
48081- desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
48082- desc_if->free_ring_resources = xgbe_free_ring_resources;
48083- desc_if->map_tx_skb = xgbe_map_tx_skb;
48084- desc_if->map_rx_buffer = xgbe_map_rx_buffer;
48085- desc_if->unmap_rdata = xgbe_unmap_rdata;
48086- desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
48087- desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
48088-
48089- DBGPR("<--xgbe_init_function_ptrs_desc\n");
48090-}
48091+const struct xgbe_desc_if default_xgbe_desc_if = {
48092+ .alloc_ring_resources = xgbe_alloc_ring_resources,
48093+ .free_ring_resources = xgbe_free_ring_resources,
48094+ .map_tx_skb = xgbe_map_tx_skb,
48095+ .map_rx_buffer = xgbe_map_rx_buffer,
48096+ .unmap_rdata = xgbe_unmap_rdata,
48097+ .wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init,
48098+ .wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init,
48099+};
48100diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48101index 400757b..d8c53f6 100644
48102--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48103+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48104@@ -2748,7 +2748,7 @@ static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
48105
48106 static int xgbe_init(struct xgbe_prv_data *pdata)
48107 {
48108- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48109+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48110 int ret;
48111
48112 DBGPR("-->xgbe_init\n");
48113@@ -2813,108 +2813,103 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
48114 return 0;
48115 }
48116
48117-void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
48118-{
48119- DBGPR("-->xgbe_init_function_ptrs\n");
48120-
48121- hw_if->tx_complete = xgbe_tx_complete;
48122-
48123- hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
48124- hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
48125- hw_if->add_mac_addresses = xgbe_add_mac_addresses;
48126- hw_if->set_mac_address = xgbe_set_mac_address;
48127-
48128- hw_if->enable_rx_csum = xgbe_enable_rx_csum;
48129- hw_if->disable_rx_csum = xgbe_disable_rx_csum;
48130-
48131- hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
48132- hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
48133- hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
48134- hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
48135- hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
48136-
48137- hw_if->read_mmd_regs = xgbe_read_mmd_regs;
48138- hw_if->write_mmd_regs = xgbe_write_mmd_regs;
48139-
48140- hw_if->set_gmii_speed = xgbe_set_gmii_speed;
48141- hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
48142- hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
48143-
48144- hw_if->enable_tx = xgbe_enable_tx;
48145- hw_if->disable_tx = xgbe_disable_tx;
48146- hw_if->enable_rx = xgbe_enable_rx;
48147- hw_if->disable_rx = xgbe_disable_rx;
48148-
48149- hw_if->powerup_tx = xgbe_powerup_tx;
48150- hw_if->powerdown_tx = xgbe_powerdown_tx;
48151- hw_if->powerup_rx = xgbe_powerup_rx;
48152- hw_if->powerdown_rx = xgbe_powerdown_rx;
48153-
48154- hw_if->dev_xmit = xgbe_dev_xmit;
48155- hw_if->dev_read = xgbe_dev_read;
48156- hw_if->enable_int = xgbe_enable_int;
48157- hw_if->disable_int = xgbe_disable_int;
48158- hw_if->init = xgbe_init;
48159- hw_if->exit = xgbe_exit;
48160+const struct xgbe_hw_if default_xgbe_hw_if = {
48161+ .tx_complete = xgbe_tx_complete,
48162+
48163+ .set_promiscuous_mode = xgbe_set_promiscuous_mode,
48164+ .set_all_multicast_mode = xgbe_set_all_multicast_mode,
48165+ .add_mac_addresses = xgbe_add_mac_addresses,
48166+ .set_mac_address = xgbe_set_mac_address,
48167+
48168+ .enable_rx_csum = xgbe_enable_rx_csum,
48169+ .disable_rx_csum = xgbe_disable_rx_csum,
48170+
48171+ .enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping,
48172+ .disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping,
48173+ .enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering,
48174+ .disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering,
48175+ .update_vlan_hash_table = xgbe_update_vlan_hash_table,
48176+
48177+ .read_mmd_regs = xgbe_read_mmd_regs,
48178+ .write_mmd_regs = xgbe_write_mmd_regs,
48179+
48180+ .set_gmii_speed = xgbe_set_gmii_speed,
48181+ .set_gmii_2500_speed = xgbe_set_gmii_2500_speed,
48182+ .set_xgmii_speed = xgbe_set_xgmii_speed,
48183+
48184+ .enable_tx = xgbe_enable_tx,
48185+ .disable_tx = xgbe_disable_tx,
48186+ .enable_rx = xgbe_enable_rx,
48187+ .disable_rx = xgbe_disable_rx,
48188+
48189+ .powerup_tx = xgbe_powerup_tx,
48190+ .powerdown_tx = xgbe_powerdown_tx,
48191+ .powerup_rx = xgbe_powerup_rx,
48192+ .powerdown_rx = xgbe_powerdown_rx,
48193+
48194+ .dev_xmit = xgbe_dev_xmit,
48195+ .dev_read = xgbe_dev_read,
48196+ .enable_int = xgbe_enable_int,
48197+ .disable_int = xgbe_disable_int,
48198+ .init = xgbe_init,
48199+ .exit = xgbe_exit,
48200
48201 /* Descriptor related Sequences have to be initialized here */
48202- hw_if->tx_desc_init = xgbe_tx_desc_init;
48203- hw_if->rx_desc_init = xgbe_rx_desc_init;
48204- hw_if->tx_desc_reset = xgbe_tx_desc_reset;
48205- hw_if->rx_desc_reset = xgbe_rx_desc_reset;
48206- hw_if->is_last_desc = xgbe_is_last_desc;
48207- hw_if->is_context_desc = xgbe_is_context_desc;
48208- hw_if->tx_start_xmit = xgbe_tx_start_xmit;
48209+ .tx_desc_init = xgbe_tx_desc_init,
48210+ .rx_desc_init = xgbe_rx_desc_init,
48211+ .tx_desc_reset = xgbe_tx_desc_reset,
48212+ .rx_desc_reset = xgbe_rx_desc_reset,
48213+ .is_last_desc = xgbe_is_last_desc,
48214+ .is_context_desc = xgbe_is_context_desc,
48215+ .tx_start_xmit = xgbe_tx_start_xmit,
48216
48217 /* For FLOW ctrl */
48218- hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
48219- hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
48220+ .config_tx_flow_control = xgbe_config_tx_flow_control,
48221+ .config_rx_flow_control = xgbe_config_rx_flow_control,
48222
48223 /* For RX coalescing */
48224- hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
48225- hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
48226- hw_if->usec_to_riwt = xgbe_usec_to_riwt;
48227- hw_if->riwt_to_usec = xgbe_riwt_to_usec;
48228+ .config_rx_coalesce = xgbe_config_rx_coalesce,
48229+ .config_tx_coalesce = xgbe_config_tx_coalesce,
48230+ .usec_to_riwt = xgbe_usec_to_riwt,
48231+ .riwt_to_usec = xgbe_riwt_to_usec,
48232
48233 /* For RX and TX threshold config */
48234- hw_if->config_rx_threshold = xgbe_config_rx_threshold;
48235- hw_if->config_tx_threshold = xgbe_config_tx_threshold;
48236+ .config_rx_threshold = xgbe_config_rx_threshold,
48237+ .config_tx_threshold = xgbe_config_tx_threshold,
48238
48239 /* For RX and TX Store and Forward Mode config */
48240- hw_if->config_rsf_mode = xgbe_config_rsf_mode;
48241- hw_if->config_tsf_mode = xgbe_config_tsf_mode;
48242+ .config_rsf_mode = xgbe_config_rsf_mode,
48243+ .config_tsf_mode = xgbe_config_tsf_mode,
48244
48245 /* For TX DMA Operating on Second Frame config */
48246- hw_if->config_osp_mode = xgbe_config_osp_mode;
48247+ .config_osp_mode = xgbe_config_osp_mode,
48248
48249 /* For RX and TX PBL config */
48250- hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
48251- hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
48252- hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
48253- hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
48254- hw_if->config_pblx8 = xgbe_config_pblx8;
48255+ .config_rx_pbl_val = xgbe_config_rx_pbl_val,
48256+ .get_rx_pbl_val = xgbe_get_rx_pbl_val,
48257+ .config_tx_pbl_val = xgbe_config_tx_pbl_val,
48258+ .get_tx_pbl_val = xgbe_get_tx_pbl_val,
48259+ .config_pblx8 = xgbe_config_pblx8,
48260
48261 /* For MMC statistics support */
48262- hw_if->tx_mmc_int = xgbe_tx_mmc_int;
48263- hw_if->rx_mmc_int = xgbe_rx_mmc_int;
48264- hw_if->read_mmc_stats = xgbe_read_mmc_stats;
48265+ .tx_mmc_int = xgbe_tx_mmc_int,
48266+ .rx_mmc_int = xgbe_rx_mmc_int,
48267+ .read_mmc_stats = xgbe_read_mmc_stats,
48268
48269 /* For PTP config */
48270- hw_if->config_tstamp = xgbe_config_tstamp;
48271- hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
48272- hw_if->set_tstamp_time = xgbe_set_tstamp_time;
48273- hw_if->get_tstamp_time = xgbe_get_tstamp_time;
48274- hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
48275+ .config_tstamp = xgbe_config_tstamp,
48276+ .update_tstamp_addend = xgbe_update_tstamp_addend,
48277+ .set_tstamp_time = xgbe_set_tstamp_time,
48278+ .get_tstamp_time = xgbe_get_tstamp_time,
48279+ .get_tx_tstamp = xgbe_get_tx_tstamp,
48280
48281 /* For Data Center Bridging config */
48282- hw_if->config_dcb_tc = xgbe_config_dcb_tc;
48283- hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
48284+ .config_dcb_tc = xgbe_config_dcb_tc,
48285+ .config_dcb_pfc = xgbe_config_dcb_pfc,
48286
48287 /* For Receive Side Scaling */
48288- hw_if->enable_rss = xgbe_enable_rss;
48289- hw_if->disable_rss = xgbe_disable_rss;
48290- hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
48291- hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
48292-
48293- DBGPR("<--xgbe_init_function_ptrs\n");
48294-}
48295+ .enable_rss = xgbe_enable_rss,
48296+ .disable_rss = xgbe_disable_rss,
48297+ .set_rss_hash_key = xgbe_set_rss_hash_key,
48298+ .set_rss_lookup_table = xgbe_set_rss_lookup_table,
48299+};
48300diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48301index 885b02b..4b31a4c 100644
48302--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48303+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48304@@ -244,7 +244,7 @@ static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
48305 * support, tell it now
48306 */
48307 if (ring->tx.xmit_more)
48308- pdata->hw_if.tx_start_xmit(channel, ring);
48309+ pdata->hw_if->tx_start_xmit(channel, ring);
48310
48311 return NETDEV_TX_BUSY;
48312 }
48313@@ -272,7 +272,7 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
48314
48315 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
48316 {
48317- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48318+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48319 struct xgbe_channel *channel;
48320 enum xgbe_int int_id;
48321 unsigned int i;
48322@@ -294,7 +294,7 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
48323
48324 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
48325 {
48326- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48327+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48328 struct xgbe_channel *channel;
48329 enum xgbe_int int_id;
48330 unsigned int i;
48331@@ -317,7 +317,7 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
48332 static irqreturn_t xgbe_isr(int irq, void *data)
48333 {
48334 struct xgbe_prv_data *pdata = data;
48335- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48336+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48337 struct xgbe_channel *channel;
48338 unsigned int dma_isr, dma_ch_isr;
48339 unsigned int mac_isr, mac_tssr;
48340@@ -673,7 +673,7 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
48341
48342 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
48343 {
48344- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48345+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48346
48347 DBGPR("-->xgbe_init_tx_coalesce\n");
48348
48349@@ -687,7 +687,7 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
48350
48351 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
48352 {
48353- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48354+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48355
48356 DBGPR("-->xgbe_init_rx_coalesce\n");
48357
48358@@ -701,7 +701,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
48359
48360 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
48361 {
48362- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48363+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48364 struct xgbe_channel *channel;
48365 struct xgbe_ring *ring;
48366 struct xgbe_ring_data *rdata;
48367@@ -726,7 +726,7 @@ static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
48368
48369 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
48370 {
48371- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48372+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48373 struct xgbe_channel *channel;
48374 struct xgbe_ring *ring;
48375 struct xgbe_ring_data *rdata;
48376@@ -752,7 +752,7 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
48377 static void xgbe_adjust_link(struct net_device *netdev)
48378 {
48379 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48380- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48381+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48382 struct phy_device *phydev = pdata->phydev;
48383 int new_state = 0;
48384
48385@@ -860,7 +860,7 @@ static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
48386 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
48387 {
48388 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48389- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48390+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48391 unsigned long flags;
48392
48393 DBGPR("-->xgbe_powerdown\n");
48394@@ -898,7 +898,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
48395 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
48396 {
48397 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48398- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48399+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48400 unsigned long flags;
48401
48402 DBGPR("-->xgbe_powerup\n");
48403@@ -935,7 +935,7 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
48404
48405 static int xgbe_start(struct xgbe_prv_data *pdata)
48406 {
48407- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48408+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48409 struct net_device *netdev = pdata->netdev;
48410 int ret;
48411
48412@@ -976,7 +976,7 @@ err_napi:
48413
48414 static void xgbe_stop(struct xgbe_prv_data *pdata)
48415 {
48416- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48417+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48418 struct xgbe_channel *channel;
48419 struct net_device *netdev = pdata->netdev;
48420 struct netdev_queue *txq;
48421@@ -1203,7 +1203,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
48422 return -ERANGE;
48423 }
48424
48425- pdata->hw_if.config_tstamp(pdata, mac_tscr);
48426+ pdata->hw_if->config_tstamp(pdata, mac_tscr);
48427
48428 memcpy(&pdata->tstamp_config, &config, sizeof(config));
48429
48430@@ -1352,7 +1352,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
48431 static int xgbe_open(struct net_device *netdev)
48432 {
48433 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48434- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48435+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48436 int ret;
48437
48438 DBGPR("-->xgbe_open\n");
48439@@ -1424,7 +1424,7 @@ err_phy_init:
48440 static int xgbe_close(struct net_device *netdev)
48441 {
48442 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48443- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48444+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48445
48446 DBGPR("-->xgbe_close\n");
48447
48448@@ -1452,8 +1452,8 @@ static int xgbe_close(struct net_device *netdev)
48449 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
48450 {
48451 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48452- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48453- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48454+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48455+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48456 struct xgbe_channel *channel;
48457 struct xgbe_ring *ring;
48458 struct xgbe_packet_data *packet;
48459@@ -1521,7 +1521,7 @@ tx_netdev_return:
48460 static void xgbe_set_rx_mode(struct net_device *netdev)
48461 {
48462 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48463- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48464+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48465 unsigned int pr_mode, am_mode;
48466
48467 DBGPR("-->xgbe_set_rx_mode\n");
48468@@ -1540,7 +1540,7 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
48469 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
48470 {
48471 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48472- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48473+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48474 struct sockaddr *saddr = addr;
48475
48476 DBGPR("-->xgbe_set_mac_address\n");
48477@@ -1607,7 +1607,7 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
48478
48479 DBGPR("-->%s\n", __func__);
48480
48481- pdata->hw_if.read_mmc_stats(pdata);
48482+ pdata->hw_if->read_mmc_stats(pdata);
48483
48484 s->rx_packets = pstats->rxframecount_gb;
48485 s->rx_bytes = pstats->rxoctetcount_gb;
48486@@ -1634,7 +1634,7 @@ static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
48487 u16 vid)
48488 {
48489 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48490- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48491+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48492
48493 DBGPR("-->%s\n", __func__);
48494
48495@@ -1650,7 +1650,7 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
48496 u16 vid)
48497 {
48498 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48499- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48500+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48501
48502 DBGPR("-->%s\n", __func__);
48503
48504@@ -1716,7 +1716,7 @@ static int xgbe_set_features(struct net_device *netdev,
48505 netdev_features_t features)
48506 {
48507 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48508- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48509+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48510 netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
48511 int ret = 0;
48512
48513@@ -1781,8 +1781,8 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
48514 static void xgbe_rx_refresh(struct xgbe_channel *channel)
48515 {
48516 struct xgbe_prv_data *pdata = channel->pdata;
48517- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48518- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48519+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48520+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48521 struct xgbe_ring *ring = channel->rx_ring;
48522 struct xgbe_ring_data *rdata;
48523
48524@@ -1835,8 +1835,8 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
48525 static int xgbe_tx_poll(struct xgbe_channel *channel)
48526 {
48527 struct xgbe_prv_data *pdata = channel->pdata;
48528- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48529- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48530+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48531+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48532 struct xgbe_ring *ring = channel->tx_ring;
48533 struct xgbe_ring_data *rdata;
48534 struct xgbe_ring_desc *rdesc;
48535@@ -1901,7 +1901,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
48536 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
48537 {
48538 struct xgbe_prv_data *pdata = channel->pdata;
48539- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48540+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48541 struct xgbe_ring *ring = channel->rx_ring;
48542 struct xgbe_ring_data *rdata;
48543 struct xgbe_packet_data *packet;
48544diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48545index ebf4893..a8f51c6 100644
48546--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48547+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48548@@ -203,7 +203,7 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev,
48549
48550 DBGPR("-->%s\n", __func__);
48551
48552- pdata->hw_if.read_mmc_stats(pdata);
48553+ pdata->hw_if->read_mmc_stats(pdata);
48554 for (i = 0; i < XGBE_STATS_COUNT; i++) {
48555 stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
48556 *data++ = *(u64 *)stat;
48557@@ -378,7 +378,7 @@ static int xgbe_get_coalesce(struct net_device *netdev,
48558 struct ethtool_coalesce *ec)
48559 {
48560 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48561- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48562+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48563 unsigned int riwt;
48564
48565 DBGPR("-->xgbe_get_coalesce\n");
48566@@ -401,7 +401,7 @@ static int xgbe_set_coalesce(struct net_device *netdev,
48567 struct ethtool_coalesce *ec)
48568 {
48569 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48570- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48571+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48572 unsigned int rx_frames, rx_riwt, rx_usecs;
48573 unsigned int tx_frames, tx_usecs;
48574
48575@@ -536,7 +536,7 @@ static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
48576 const u8 *key, const u8 hfunc)
48577 {
48578 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48579- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48580+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48581 unsigned int ret;
48582
48583 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
48584diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48585index 32dd651..225cca3 100644
48586--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48587+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48588@@ -159,12 +159,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
48589 DBGPR("<--xgbe_default_config\n");
48590 }
48591
48592-static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
48593-{
48594- xgbe_init_function_ptrs_dev(&pdata->hw_if);
48595- xgbe_init_function_ptrs_desc(&pdata->desc_if);
48596-}
48597-
48598 #ifdef CONFIG_ACPI
48599 static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
48600 {
48601@@ -396,9 +390,8 @@ static int xgbe_probe(struct platform_device *pdev)
48602 memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
48603
48604 /* Set all the function pointers */
48605- xgbe_init_all_fptrs(pdata);
48606- hw_if = &pdata->hw_if;
48607- desc_if = &pdata->desc_if;
48608+ hw_if = pdata->hw_if = &default_xgbe_hw_if;
48609+ desc_if = pdata->desc_if = &default_xgbe_desc_if;
48610
48611 /* Issue software reset to device */
48612 hw_if->exit(pdata);
48613diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48614index 59e267f..0842a88 100644
48615--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48616+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48617@@ -126,7 +126,7 @@
48618 static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
48619 {
48620 struct xgbe_prv_data *pdata = mii->priv;
48621- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48622+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48623 int mmd_data;
48624
48625 DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
48626@@ -143,7 +143,7 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
48627 u16 mmd_val)
48628 {
48629 struct xgbe_prv_data *pdata = mii->priv;
48630- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48631+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48632 int mmd_data = mmd_val;
48633
48634 DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
48635diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
48636index f326178..8bd7daf 100644
48637--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
48638+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
48639@@ -129,7 +129,7 @@ static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
48640 tstamp_cc);
48641 u64 nsec;
48642
48643- nsec = pdata->hw_if.get_tstamp_time(pdata);
48644+ nsec = pdata->hw_if->get_tstamp_time(pdata);
48645
48646 return nsec;
48647 }
48648@@ -158,7 +158,7 @@ static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
48649
48650 spin_lock_irqsave(&pdata->tstamp_lock, flags);
48651
48652- pdata->hw_if.update_tstamp_addend(pdata, addend);
48653+ pdata->hw_if->update_tstamp_addend(pdata, addend);
48654
48655 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
48656
48657diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
48658index 13e8f95..1d8beef 100644
48659--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
48660+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
48661@@ -675,8 +675,8 @@ struct xgbe_prv_data {
48662 int dev_irq;
48663 unsigned int per_channel_irq;
48664
48665- struct xgbe_hw_if hw_if;
48666- struct xgbe_desc_if desc_if;
48667+ struct xgbe_hw_if *hw_if;
48668+ struct xgbe_desc_if *desc_if;
48669
48670 /* AXI DMA settings */
48671 unsigned int coherent;
48672@@ -798,6 +798,9 @@ struct xgbe_prv_data {
48673 #endif
48674 };
48675
48676+extern const struct xgbe_hw_if default_xgbe_hw_if;
48677+extern const struct xgbe_desc_if default_xgbe_desc_if;
48678+
48679 /* Function prototypes*/
48680
48681 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
48682diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
48683index adcacda..fa6e0ae 100644
48684--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
48685+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
48686@@ -1065,7 +1065,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
48687 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
48688 {
48689 /* RX_MODE controlling object */
48690- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
48691+ bnx2x_init_rx_mode_obj(bp);
48692
48693 /* multicast configuration controlling object */
48694 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
48695diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
48696index 07cdf9b..b08ecc7 100644
48697--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
48698+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
48699@@ -2329,15 +2329,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
48700 return rc;
48701 }
48702
48703-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
48704- struct bnx2x_rx_mode_obj *o)
48705+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
48706 {
48707 if (CHIP_IS_E1x(bp)) {
48708- o->wait_comp = bnx2x_empty_rx_mode_wait;
48709- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
48710+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
48711+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
48712 } else {
48713- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
48714- o->config_rx_mode = bnx2x_set_rx_mode_e2;
48715+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
48716+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
48717 }
48718 }
48719
48720diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
48721index 86baecb..ff3bb46 100644
48722--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
48723+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
48724@@ -1411,8 +1411,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
48725
48726 /********************* RX MODE ****************/
48727
48728-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
48729- struct bnx2x_rx_mode_obj *o);
48730+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
48731
48732 /**
48733 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
48734diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
48735index 31c9f82..e65e986 100644
48736--- a/drivers/net/ethernet/broadcom/tg3.h
48737+++ b/drivers/net/ethernet/broadcom/tg3.h
48738@@ -150,6 +150,7 @@
48739 #define CHIPREV_ID_5750_A0 0x4000
48740 #define CHIPREV_ID_5750_A1 0x4001
48741 #define CHIPREV_ID_5750_A3 0x4003
48742+#define CHIPREV_ID_5750_C1 0x4201
48743 #define CHIPREV_ID_5750_C2 0x4202
48744 #define CHIPREV_ID_5752_A0_HW 0x5000
48745 #define CHIPREV_ID_5752_A0 0x6000
48746diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
48747index 903466e..b285864 100644
48748--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
48749+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
48750@@ -1693,10 +1693,10 @@ bna_cb_ioceth_reset(void *arg)
48751 }
48752
48753 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
48754- bna_cb_ioceth_enable,
48755- bna_cb_ioceth_disable,
48756- bna_cb_ioceth_hbfail,
48757- bna_cb_ioceth_reset
48758+ .enable_cbfn = bna_cb_ioceth_enable,
48759+ .disable_cbfn = bna_cb_ioceth_disable,
48760+ .hbfail_cbfn = bna_cb_ioceth_hbfail,
48761+ .reset_cbfn = bna_cb_ioceth_reset
48762 };
48763
48764 static void bna_attr_init(struct bna_ioceth *ioceth)
48765diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
48766index 8cffcdf..aadf043 100644
48767--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
48768+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
48769@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
48770 */
48771 struct l2t_skb_cb {
48772 arp_failure_handler_func arp_failure_handler;
48773-};
48774+} __no_const;
48775
48776 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
48777
48778diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
48779index d929951..a2c23f5 100644
48780--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
48781+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
48782@@ -2215,7 +2215,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
48783
48784 int i;
48785 struct adapter *ap = netdev2adap(dev);
48786- static const unsigned int *reg_ranges;
48787+ const unsigned int *reg_ranges;
48788 int arr_size = 0, buf_size = 0;
48789
48790 if (is_t4(ap->params.chip)) {
48791diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
48792index badff18..e15c4ec 100644
48793--- a/drivers/net/ethernet/dec/tulip/de4x5.c
48794+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
48795@@ -5373,7 +5373,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
48796 for (i=0; i<ETH_ALEN; i++) {
48797 tmp.addr[i] = dev->dev_addr[i];
48798 }
48799- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
48800+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
48801 break;
48802
48803 case DE4X5_SET_HWADDR: /* Set the hardware address */
48804@@ -5413,7 +5413,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
48805 spin_lock_irqsave(&lp->lock, flags);
48806 memcpy(&statbuf, &lp->pktStats, ioc->len);
48807 spin_unlock_irqrestore(&lp->lock, flags);
48808- if (copy_to_user(ioc->data, &statbuf, ioc->len))
48809+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
48810 return -EFAULT;
48811 break;
48812 }
48813diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
48814index e6b790f..051ba2d 100644
48815--- a/drivers/net/ethernet/emulex/benet/be_main.c
48816+++ b/drivers/net/ethernet/emulex/benet/be_main.c
48817@@ -536,7 +536,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
48818
48819 if (wrapped)
48820 newacc += 65536;
48821- ACCESS_ONCE(*acc) = newacc;
48822+ ACCESS_ONCE_RW(*acc) = newacc;
48823 }
48824
48825 static void populate_erx_stats(struct be_adapter *adapter,
48826diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
48827index 6d0c5d5..55be363 100644
48828--- a/drivers/net/ethernet/faraday/ftgmac100.c
48829+++ b/drivers/net/ethernet/faraday/ftgmac100.c
48830@@ -30,6 +30,8 @@
48831 #include <linux/netdevice.h>
48832 #include <linux/phy.h>
48833 #include <linux/platform_device.h>
48834+#include <linux/interrupt.h>
48835+#include <linux/irqreturn.h>
48836 #include <net/ip.h>
48837
48838 #include "ftgmac100.h"
48839diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
48840index dce5f7b..2433466 100644
48841--- a/drivers/net/ethernet/faraday/ftmac100.c
48842+++ b/drivers/net/ethernet/faraday/ftmac100.c
48843@@ -31,6 +31,8 @@
48844 #include <linux/module.h>
48845 #include <linux/netdevice.h>
48846 #include <linux/platform_device.h>
48847+#include <linux/interrupt.h>
48848+#include <linux/irqreturn.h>
48849
48850 #include "ftmac100.h"
48851
48852diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
48853index fabcfa1..188fd22 100644
48854--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
48855+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
48856@@ -419,7 +419,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
48857 wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
48858
48859 /* Update the base adjustement value. */
48860- ACCESS_ONCE(pf->ptp_base_adj) = incval;
48861+ ACCESS_ONCE_RW(pf->ptp_base_adj) = incval;
48862 smp_mb(); /* Force the above update. */
48863 }
48864
48865diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
48866index 79c00f5..8da39f6 100644
48867--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
48868+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
48869@@ -785,7 +785,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
48870 }
48871
48872 /* update the base incval used to calculate frequency adjustment */
48873- ACCESS_ONCE(adapter->base_incval) = incval;
48874+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
48875 smp_mb();
48876
48877 /* need lock to prevent incorrect read while modifying cyclecounter */
48878diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
48879index 8c234ec..757331f 100644
48880--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
48881+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
48882@@ -468,8 +468,8 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
48883 wmb();
48884
48885 /* we want to dirty this cache line once */
48886- ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
48887- ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;
48888+ ACCESS_ONCE_RW(ring->last_nr_txbb) = last_nr_txbb;
48889+ ACCESS_ONCE_RW(ring->cons) = ring_cons + txbbs_skipped;
48890
48891 netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
48892
48893diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
48894index 6223930..975033d 100644
48895--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
48896+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
48897@@ -3457,7 +3457,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
48898 struct __vxge_hw_fifo *fifo;
48899 struct vxge_hw_fifo_config *config;
48900 u32 txdl_size, txdl_per_memblock;
48901- struct vxge_hw_mempool_cbs fifo_mp_callback;
48902+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
48903+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
48904+ };
48905+
48906 struct __vxge_hw_virtualpath *vpath;
48907
48908 if ((vp == NULL) || (attr == NULL)) {
48909@@ -3540,8 +3543,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
48910 goto exit;
48911 }
48912
48913- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
48914-
48915 fifo->mempool =
48916 __vxge_hw_mempool_create(vpath->hldev,
48917 fifo->config->memblock_size,
48918diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
48919index 2bb48d5..d1a865d 100644
48920--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
48921+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
48922@@ -2324,7 +2324,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
48923 max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
48924 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
48925 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
48926- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
48927+ pax_open_kernel();
48928+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
48929+ pax_close_kernel();
48930 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48931 max_sds_rings = QLCNIC_MAX_SDS_RINGS;
48932 max_tx_rings = QLCNIC_MAX_TX_RINGS;
48933diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
48934index be7d7a6..a8983f8 100644
48935--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
48936+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
48937@@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
48938 case QLCNIC_NON_PRIV_FUNC:
48939 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
48940 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48941- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
48942+ pax_open_kernel();
48943+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
48944+ pax_close_kernel();
48945 break;
48946 case QLCNIC_PRIV_FUNC:
48947 ahw->op_mode = QLCNIC_PRIV_FUNC;
48948 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
48949- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
48950+ pax_open_kernel();
48951+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
48952+ pax_close_kernel();
48953 break;
48954 case QLCNIC_MGMT_FUNC:
48955 ahw->op_mode = QLCNIC_MGMT_FUNC;
48956 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48957- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
48958+ pax_open_kernel();
48959+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
48960+ pax_close_kernel();
48961 break;
48962 default:
48963 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
48964diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48965index 332bb8a..e6adcd1 100644
48966--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48967+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48968@@ -1285,7 +1285,7 @@ flash_temp:
48969 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
48970 {
48971 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
48972- static const struct qlcnic_dump_operations *fw_dump_ops;
48973+ const struct qlcnic_dump_operations *fw_dump_ops;
48974 struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
48975 u32 entry_offset, dump, no_entries, buf_offset = 0;
48976 int i, k, ops_cnt, ops_index, dump_size = 0;
48977diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
48978index c70ab40..00b28e0 100644
48979--- a/drivers/net/ethernet/realtek/r8169.c
48980+++ b/drivers/net/ethernet/realtek/r8169.c
48981@@ -788,22 +788,22 @@ struct rtl8169_private {
48982 struct mdio_ops {
48983 void (*write)(struct rtl8169_private *, int, int);
48984 int (*read)(struct rtl8169_private *, int);
48985- } mdio_ops;
48986+ } __no_const mdio_ops;
48987
48988 struct pll_power_ops {
48989 void (*down)(struct rtl8169_private *);
48990 void (*up)(struct rtl8169_private *);
48991- } pll_power_ops;
48992+ } __no_const pll_power_ops;
48993
48994 struct jumbo_ops {
48995 void (*enable)(struct rtl8169_private *);
48996 void (*disable)(struct rtl8169_private *);
48997- } jumbo_ops;
48998+ } __no_const jumbo_ops;
48999
49000 struct csi_ops {
49001 void (*write)(struct rtl8169_private *, int, int);
49002 u32 (*read)(struct rtl8169_private *, int);
49003- } csi_ops;
49004+ } __no_const csi_ops;
49005
49006 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
49007 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
49008diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
49009index 6b861e3..204ac86 100644
49010--- a/drivers/net/ethernet/sfc/ptp.c
49011+++ b/drivers/net/ethernet/sfc/ptp.c
49012@@ -822,7 +822,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
49013 ptp->start.dma_addr);
49014
49015 /* Clear flag that signals MC ready */
49016- ACCESS_ONCE(*start) = 0;
49017+ ACCESS_ONCE_RW(*start) = 0;
49018 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
49019 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
49020 EFX_BUG_ON_PARANOID(rc);
49021diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
49022index 10b6173..b605dfd5 100644
49023--- a/drivers/net/ethernet/sfc/selftest.c
49024+++ b/drivers/net/ethernet/sfc/selftest.c
49025@@ -46,7 +46,7 @@ struct efx_loopback_payload {
49026 struct iphdr ip;
49027 struct udphdr udp;
49028 __be16 iteration;
49029- const char msg[64];
49030+ char msg[64];
49031 } __packed;
49032
49033 /* Loopback test source MAC address */
49034diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49035index 08c483b..2c4a553 100644
49036--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49037+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49038@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
49039
49040 writel(value, ioaddr + MMC_CNTRL);
49041
49042- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
49043- MMC_CNTRL, value);
49044+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
49045+// MMC_CNTRL, value);
49046 }
49047
49048 /* To mask all all interrupts.*/
49049diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
49050index 17e2766..c332f1e 100644
49051--- a/drivers/net/ethernet/via/via-rhine.c
49052+++ b/drivers/net/ethernet/via/via-rhine.c
49053@@ -2514,7 +2514,7 @@ static struct platform_driver rhine_driver_platform = {
49054 }
49055 };
49056
49057-static struct dmi_system_id rhine_dmi_table[] __initdata = {
49058+static const struct dmi_system_id rhine_dmi_table[] __initconst = {
49059 {
49060 .ident = "EPIA-M",
49061 .matches = {
49062diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
49063index 384ca4f..dd7d4f9 100644
49064--- a/drivers/net/hyperv/hyperv_net.h
49065+++ b/drivers/net/hyperv/hyperv_net.h
49066@@ -171,7 +171,7 @@ struct rndis_device {
49067 enum rndis_device_state state;
49068 bool link_state;
49069 bool link_change;
49070- atomic_t new_req_id;
49071+ atomic_unchecked_t new_req_id;
49072
49073 spinlock_t request_lock;
49074 struct list_head req_list;
49075diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
49076index 7816d98..7890614 100644
49077--- a/drivers/net/hyperv/rndis_filter.c
49078+++ b/drivers/net/hyperv/rndis_filter.c
49079@@ -102,7 +102,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
49080 * template
49081 */
49082 set = &rndis_msg->msg.set_req;
49083- set->req_id = atomic_inc_return(&dev->new_req_id);
49084+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
49085
49086 /* Add to the request list */
49087 spin_lock_irqsave(&dev->request_lock, flags);
49088@@ -918,7 +918,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
49089
49090 /* Setup the rndis set */
49091 halt = &request->request_msg.msg.halt_req;
49092- halt->req_id = atomic_inc_return(&dev->new_req_id);
49093+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
49094
49095 /* Ignore return since this msg is optional. */
49096 rndis_filter_send_request(dev, request);
49097diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
49098index 34f846b..4a0d5b1 100644
49099--- a/drivers/net/ifb.c
49100+++ b/drivers/net/ifb.c
49101@@ -253,7 +253,7 @@ static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
49102 return 0;
49103 }
49104
49105-static struct rtnl_link_ops ifb_link_ops __read_mostly = {
49106+static struct rtnl_link_ops ifb_link_ops = {
49107 .kind = "ifb",
49108 .priv_size = sizeof(struct ifb_private),
49109 .setup = ifb_setup,
49110diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
49111index 1df38bd..4bc20b0 100644
49112--- a/drivers/net/macvlan.c
49113+++ b/drivers/net/macvlan.c
49114@@ -335,7 +335,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
49115 free_nskb:
49116 kfree_skb(nskb);
49117 err:
49118- atomic_long_inc(&skb->dev->rx_dropped);
49119+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
49120 }
49121
49122 static void macvlan_flush_sources(struct macvlan_port *port,
49123@@ -1459,13 +1459,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
49124 int macvlan_link_register(struct rtnl_link_ops *ops)
49125 {
49126 /* common fields */
49127- ops->priv_size = sizeof(struct macvlan_dev);
49128- ops->validate = macvlan_validate;
49129- ops->maxtype = IFLA_MACVLAN_MAX;
49130- ops->policy = macvlan_policy;
49131- ops->changelink = macvlan_changelink;
49132- ops->get_size = macvlan_get_size;
49133- ops->fill_info = macvlan_fill_info;
49134+ pax_open_kernel();
49135+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
49136+ *(void **)&ops->validate = macvlan_validate;
49137+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
49138+ *(const void **)&ops->policy = macvlan_policy;
49139+ *(void **)&ops->changelink = macvlan_changelink;
49140+ *(void **)&ops->get_size = macvlan_get_size;
49141+ *(void **)&ops->fill_info = macvlan_fill_info;
49142+ pax_close_kernel();
49143
49144 return rtnl_link_register(ops);
49145 };
49146@@ -1551,7 +1553,7 @@ static int macvlan_device_event(struct notifier_block *unused,
49147 return NOTIFY_DONE;
49148 }
49149
49150-static struct notifier_block macvlan_notifier_block __read_mostly = {
49151+static struct notifier_block macvlan_notifier_block = {
49152 .notifier_call = macvlan_device_event,
49153 };
49154
49155diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
49156index 27ecc5c..f636328 100644
49157--- a/drivers/net/macvtap.c
49158+++ b/drivers/net/macvtap.c
49159@@ -436,7 +436,7 @@ static void macvtap_setup(struct net_device *dev)
49160 dev->tx_queue_len = TUN_READQ_SIZE;
49161 }
49162
49163-static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
49164+static struct rtnl_link_ops macvtap_link_ops = {
49165 .kind = "macvtap",
49166 .setup = macvtap_setup,
49167 .newlink = macvtap_newlink,
49168@@ -1033,7 +1033,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
49169
49170 ret = 0;
49171 u = q->flags;
49172- if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
49173+ if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
49174 put_user(u, &ifr->ifr_flags))
49175 ret = -EFAULT;
49176 macvtap_put_vlan(vlan);
49177@@ -1217,7 +1217,7 @@ static int macvtap_device_event(struct notifier_block *unused,
49178 return NOTIFY_DONE;
49179 }
49180
49181-static struct notifier_block macvtap_notifier_block __read_mostly = {
49182+static struct notifier_block macvtap_notifier_block = {
49183 .notifier_call = macvtap_device_event,
49184 };
49185
49186diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
49187index 34924df..a747360 100644
49188--- a/drivers/net/nlmon.c
49189+++ b/drivers/net/nlmon.c
49190@@ -154,7 +154,7 @@ static int nlmon_validate(struct nlattr *tb[], struct nlattr *data[])
49191 return 0;
49192 }
49193
49194-static struct rtnl_link_ops nlmon_link_ops __read_mostly = {
49195+static struct rtnl_link_ops nlmon_link_ops = {
49196 .kind = "nlmon",
49197 .priv_size = sizeof(struct nlmon),
49198 .setup = nlmon_setup,
49199diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
49200index bdfe51f..e7845c7 100644
49201--- a/drivers/net/phy/phy_device.c
49202+++ b/drivers/net/phy/phy_device.c
49203@@ -218,7 +218,7 @@ EXPORT_SYMBOL(phy_device_create);
49204 * zero on success.
49205 *
49206 */
49207-static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
49208+static int get_phy_c45_ids(struct mii_bus *bus, int addr, int *phy_id,
49209 struct phy_c45_device_ids *c45_ids) {
49210 int phy_reg;
49211 int i, reg_addr;
49212@@ -288,7 +288,7 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
49213 * its return value is in turn returned.
49214 *
49215 */
49216-static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
49217+static int get_phy_id(struct mii_bus *bus, int addr, int *phy_id,
49218 bool is_c45, struct phy_c45_device_ids *c45_ids)
49219 {
49220 int phy_reg;
49221@@ -326,7 +326,7 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
49222 struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
49223 {
49224 struct phy_c45_device_ids c45_ids = {0};
49225- u32 phy_id = 0;
49226+ int phy_id = 0;
49227 int r;
49228
49229 r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids);
49230diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
49231index 9d15566..5ad4ef6 100644
49232--- a/drivers/net/ppp/ppp_generic.c
49233+++ b/drivers/net/ppp/ppp_generic.c
49234@@ -1022,7 +1022,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
49235 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
49236 struct ppp_stats stats;
49237 struct ppp_comp_stats cstats;
49238- char *vers;
49239
49240 switch (cmd) {
49241 case SIOCGPPPSTATS:
49242@@ -1044,8 +1043,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
49243 break;
49244
49245 case SIOCGPPPVER:
49246- vers = PPP_VERSION;
49247- if (copy_to_user(addr, vers, strlen(vers) + 1))
49248+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
49249 break;
49250 err = 0;
49251 break;
49252diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
49253index 079f7ad..b2a2bfa7 100644
49254--- a/drivers/net/slip/slhc.c
49255+++ b/drivers/net/slip/slhc.c
49256@@ -487,7 +487,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
49257 register struct tcphdr *thp;
49258 register struct iphdr *ip;
49259 register struct cstate *cs;
49260- int len, hdrlen;
49261+ long len, hdrlen;
49262 unsigned char *cp = icp;
49263
49264 /* We've got a compressed packet; read the change byte */
49265diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
49266index 7d39484..d58499d 100644
49267--- a/drivers/net/team/team.c
49268+++ b/drivers/net/team/team.c
49269@@ -2099,7 +2099,7 @@ static unsigned int team_get_num_rx_queues(void)
49270 return TEAM_DEFAULT_NUM_RX_QUEUES;
49271 }
49272
49273-static struct rtnl_link_ops team_link_ops __read_mostly = {
49274+static struct rtnl_link_ops team_link_ops = {
49275 .kind = DRV_NAME,
49276 .priv_size = sizeof(struct team),
49277 .setup = team_setup,
49278@@ -2889,7 +2889,7 @@ static int team_device_event(struct notifier_block *unused,
49279 return NOTIFY_DONE;
49280 }
49281
49282-static struct notifier_block team_notifier_block __read_mostly = {
49283+static struct notifier_block team_notifier_block = {
49284 .notifier_call = team_device_event,
49285 };
49286
49287diff --git a/drivers/net/tun.c b/drivers/net/tun.c
49288index 857dca4..642f532 100644
49289--- a/drivers/net/tun.c
49290+++ b/drivers/net/tun.c
49291@@ -1421,7 +1421,7 @@ static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
49292 return -EINVAL;
49293 }
49294
49295-static struct rtnl_link_ops tun_link_ops __read_mostly = {
49296+static struct rtnl_link_ops tun_link_ops = {
49297 .kind = DRV_NAME,
49298 .priv_size = sizeof(struct tun_struct),
49299 .setup = tun_setup,
49300@@ -1830,7 +1830,7 @@ unlock:
49301 }
49302
49303 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
49304- unsigned long arg, int ifreq_len)
49305+ unsigned long arg, size_t ifreq_len)
49306 {
49307 struct tun_file *tfile = file->private_data;
49308 struct tun_struct *tun;
49309@@ -1844,6 +1844,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
49310 int le;
49311 int ret;
49312
49313+ if (ifreq_len > sizeof ifr)
49314+ return -EFAULT;
49315+
49316 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
49317 if (copy_from_user(&ifr, argp, ifreq_len))
49318 return -EFAULT;
49319diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
49320index 778e915..58c4d95 100644
49321--- a/drivers/net/usb/hso.c
49322+++ b/drivers/net/usb/hso.c
49323@@ -70,7 +70,7 @@
49324 #include <asm/byteorder.h>
49325 #include <linux/serial_core.h>
49326 #include <linux/serial.h>
49327-
49328+#include <asm/local.h>
49329
49330 #define MOD_AUTHOR "Option Wireless"
49331 #define MOD_DESCRIPTION "USB High Speed Option driver"
49332@@ -1183,7 +1183,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
49333 struct urb *urb;
49334
49335 urb = serial->rx_urb[0];
49336- if (serial->port.count > 0) {
49337+ if (atomic_read(&serial->port.count) > 0) {
49338 count = put_rxbuf_data(urb, serial);
49339 if (count == -1)
49340 return;
49341@@ -1221,7 +1221,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
49342 DUMP1(urb->transfer_buffer, urb->actual_length);
49343
49344 /* Anyone listening? */
49345- if (serial->port.count == 0)
49346+ if (atomic_read(&serial->port.count) == 0)
49347 return;
49348
49349 if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
49350@@ -1282,8 +1282,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
49351 tty_port_tty_set(&serial->port, tty);
49352
49353 /* check for port already opened, if not set the termios */
49354- serial->port.count++;
49355- if (serial->port.count == 1) {
49356+ if (atomic_inc_return(&serial->port.count) == 1) {
49357 serial->rx_state = RX_IDLE;
49358 /* Force default termio settings */
49359 _hso_serial_set_termios(tty, NULL);
49360@@ -1293,7 +1292,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
49361 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
49362 if (result) {
49363 hso_stop_serial_device(serial->parent);
49364- serial->port.count--;
49365+ atomic_dec(&serial->port.count);
49366 } else {
49367 kref_get(&serial->parent->ref);
49368 }
49369@@ -1331,10 +1330,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
49370
49371 /* reset the rts and dtr */
49372 /* do the actual close */
49373- serial->port.count--;
49374+ atomic_dec(&serial->port.count);
49375
49376- if (serial->port.count <= 0) {
49377- serial->port.count = 0;
49378+ if (atomic_read(&serial->port.count) <= 0) {
49379+ atomic_set(&serial->port.count, 0);
49380 tty_port_tty_set(&serial->port, NULL);
49381 if (!usb_gone)
49382 hso_stop_serial_device(serial->parent);
49383@@ -1417,7 +1416,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
49384
49385 /* the actual setup */
49386 spin_lock_irqsave(&serial->serial_lock, flags);
49387- if (serial->port.count)
49388+ if (atomic_read(&serial->port.count))
49389 _hso_serial_set_termios(tty, old);
49390 else
49391 tty->termios = *old;
49392@@ -1886,7 +1885,7 @@ static void intr_callback(struct urb *urb)
49393 D1("Pending read interrupt on port %d\n", i);
49394 spin_lock(&serial->serial_lock);
49395 if (serial->rx_state == RX_IDLE &&
49396- serial->port.count > 0) {
49397+ atomic_read(&serial->port.count) > 0) {
49398 /* Setup and send a ctrl req read on
49399 * port i */
49400 if (!serial->rx_urb_filled[0]) {
49401@@ -3053,7 +3052,7 @@ static int hso_resume(struct usb_interface *iface)
49402 /* Start all serial ports */
49403 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
49404 if (serial_table[i] && (serial_table[i]->interface == iface)) {
49405- if (dev2ser(serial_table[i])->port.count) {
49406+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
49407 result =
49408 hso_start_serial_device(serial_table[i], GFP_NOIO);
49409 hso_kick_transmit(dev2ser(serial_table[i]));
49410diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
49411index 9f7c0ab..1577b4a 100644
49412--- a/drivers/net/usb/r8152.c
49413+++ b/drivers/net/usb/r8152.c
49414@@ -601,7 +601,7 @@ struct r8152 {
49415 void (*unload)(struct r8152 *);
49416 int (*eee_get)(struct r8152 *, struct ethtool_eee *);
49417 int (*eee_set)(struct r8152 *, struct ethtool_eee *);
49418- } rtl_ops;
49419+ } __no_const rtl_ops;
49420
49421 int intr_interval;
49422 u32 saved_wolopts;
49423diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
49424index a2515887..6d13233 100644
49425--- a/drivers/net/usb/sierra_net.c
49426+++ b/drivers/net/usb/sierra_net.c
49427@@ -51,7 +51,7 @@ static const char driver_name[] = "sierra_net";
49428 /* atomic counter partially included in MAC address to make sure 2 devices
49429 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
49430 */
49431-static atomic_t iface_counter = ATOMIC_INIT(0);
49432+static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
49433
49434 /*
49435 * SYNC Timer Delay definition used to set the expiry time
49436@@ -697,7 +697,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
49437 dev->net->netdev_ops = &sierra_net_device_ops;
49438
49439 /* change MAC addr to include, ifacenum, and to be unique */
49440- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
49441+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
49442 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
49443
49444 /* we will have to manufacture ethernet headers, prepare template */
49445diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
49446index 777757a..395a767 100644
49447--- a/drivers/net/usb/usbnet.c
49448+++ b/drivers/net/usb/usbnet.c
49449@@ -1285,7 +1285,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
49450 struct net_device *net)
49451 {
49452 struct usbnet *dev = netdev_priv(net);
49453- int length;
49454+ unsigned int length;
49455 struct urb *urb = NULL;
49456 struct skb_data *entry;
49457 struct driver_info *info = dev->driver_info;
49458@@ -1413,7 +1413,7 @@ not_drop:
49459 }
49460 } else
49461 netif_dbg(dev, tx_queued, dev->net,
49462- "> tx, len %d, type 0x%x\n", length, skb->protocol);
49463+ "> tx, len %u, type 0x%x\n", length, skb->protocol);
49464 #ifdef CONFIG_PM
49465 deferred:
49466 #endif
49467diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
49468index 59b0e97..a6ed579 100644
49469--- a/drivers/net/virtio_net.c
49470+++ b/drivers/net/virtio_net.c
49471@@ -48,7 +48,7 @@ module_param(gso, bool, 0444);
49472 #define RECEIVE_AVG_WEIGHT 64
49473
49474 /* Minimum alignment for mergeable packet buffers. */
49475-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
49476+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256UL)
49477
49478 #define VIRTNET_DRIVER_VERSION "1.0.0"
49479
49480diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
49481index fceb637..37c70fd 100644
49482--- a/drivers/net/vxlan.c
49483+++ b/drivers/net/vxlan.c
49484@@ -2935,7 +2935,7 @@ static struct net *vxlan_get_link_net(const struct net_device *dev)
49485 return vxlan->net;
49486 }
49487
49488-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
49489+static struct rtnl_link_ops vxlan_link_ops = {
49490 .kind = "vxlan",
49491 .maxtype = IFLA_VXLAN_MAX,
49492 .policy = vxlan_policy,
49493@@ -2983,7 +2983,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
49494 return NOTIFY_DONE;
49495 }
49496
49497-static struct notifier_block vxlan_notifier_block __read_mostly = {
49498+static struct notifier_block vxlan_notifier_block = {
49499 .notifier_call = vxlan_lowerdev_event,
49500 };
49501
49502diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
49503index 5920c99..ff2e4a5 100644
49504--- a/drivers/net/wan/lmc/lmc_media.c
49505+++ b/drivers/net/wan/lmc/lmc_media.c
49506@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
49507 static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
49508
49509 lmc_media_t lmc_ds3_media = {
49510- lmc_ds3_init, /* special media init stuff */
49511- lmc_ds3_default, /* reset to default state */
49512- lmc_ds3_set_status, /* reset status to state provided */
49513- lmc_dummy_set_1, /* set clock source */
49514- lmc_dummy_set2_1, /* set line speed */
49515- lmc_ds3_set_100ft, /* set cable length */
49516- lmc_ds3_set_scram, /* set scrambler */
49517- lmc_ds3_get_link_status, /* get link status */
49518- lmc_dummy_set_1, /* set link status */
49519- lmc_ds3_set_crc_length, /* set CRC length */
49520- lmc_dummy_set_1, /* set T1 or E1 circuit type */
49521- lmc_ds3_watchdog
49522+ .init = lmc_ds3_init, /* special media init stuff */
49523+ .defaults = lmc_ds3_default, /* reset to default state */
49524+ .set_status = lmc_ds3_set_status, /* reset status to state provided */
49525+ .set_clock_source = lmc_dummy_set_1, /* set clock source */
49526+ .set_speed = lmc_dummy_set2_1, /* set line speed */
49527+ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
49528+ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
49529+ .get_link_status = lmc_ds3_get_link_status, /* get link status */
49530+ .set_link_status = lmc_dummy_set_1, /* set link status */
49531+ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
49532+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49533+ .watchdog = lmc_ds3_watchdog
49534 };
49535
49536 lmc_media_t lmc_hssi_media = {
49537- lmc_hssi_init, /* special media init stuff */
49538- lmc_hssi_default, /* reset to default state */
49539- lmc_hssi_set_status, /* reset status to state provided */
49540- lmc_hssi_set_clock, /* set clock source */
49541- lmc_dummy_set2_1, /* set line speed */
49542- lmc_dummy_set_1, /* set cable length */
49543- lmc_dummy_set_1, /* set scrambler */
49544- lmc_hssi_get_link_status, /* get link status */
49545- lmc_hssi_set_link_status, /* set link status */
49546- lmc_hssi_set_crc_length, /* set CRC length */
49547- lmc_dummy_set_1, /* set T1 or E1 circuit type */
49548- lmc_hssi_watchdog
49549+ .init = lmc_hssi_init, /* special media init stuff */
49550+ .defaults = lmc_hssi_default, /* reset to default state */
49551+ .set_status = lmc_hssi_set_status, /* reset status to state provided */
49552+ .set_clock_source = lmc_hssi_set_clock, /* set clock source */
49553+ .set_speed = lmc_dummy_set2_1, /* set line speed */
49554+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
49555+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49556+ .get_link_status = lmc_hssi_get_link_status, /* get link status */
49557+ .set_link_status = lmc_hssi_set_link_status, /* set link status */
49558+ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
49559+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49560+ .watchdog = lmc_hssi_watchdog
49561 };
49562
49563-lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
49564- lmc_ssi_default, /* reset to default state */
49565- lmc_ssi_set_status, /* reset status to state provided */
49566- lmc_ssi_set_clock, /* set clock source */
49567- lmc_ssi_set_speed, /* set line speed */
49568- lmc_dummy_set_1, /* set cable length */
49569- lmc_dummy_set_1, /* set scrambler */
49570- lmc_ssi_get_link_status, /* get link status */
49571- lmc_ssi_set_link_status, /* set link status */
49572- lmc_ssi_set_crc_length, /* set CRC length */
49573- lmc_dummy_set_1, /* set T1 or E1 circuit type */
49574- lmc_ssi_watchdog
49575+lmc_media_t lmc_ssi_media = {
49576+ .init = lmc_ssi_init, /* special media init stuff */
49577+ .defaults = lmc_ssi_default, /* reset to default state */
49578+ .set_status = lmc_ssi_set_status, /* reset status to state provided */
49579+ .set_clock_source = lmc_ssi_set_clock, /* set clock source */
49580+ .set_speed = lmc_ssi_set_speed, /* set line speed */
49581+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
49582+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49583+ .get_link_status = lmc_ssi_get_link_status, /* get link status */
49584+ .set_link_status = lmc_ssi_set_link_status, /* set link status */
49585+ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
49586+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49587+ .watchdog = lmc_ssi_watchdog
49588 };
49589
49590 lmc_media_t lmc_t1_media = {
49591- lmc_t1_init, /* special media init stuff */
49592- lmc_t1_default, /* reset to default state */
49593- lmc_t1_set_status, /* reset status to state provided */
49594- lmc_t1_set_clock, /* set clock source */
49595- lmc_dummy_set2_1, /* set line speed */
49596- lmc_dummy_set_1, /* set cable length */
49597- lmc_dummy_set_1, /* set scrambler */
49598- lmc_t1_get_link_status, /* get link status */
49599- lmc_dummy_set_1, /* set link status */
49600- lmc_t1_set_crc_length, /* set CRC length */
49601- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
49602- lmc_t1_watchdog
49603+ .init = lmc_t1_init, /* special media init stuff */
49604+ .defaults = lmc_t1_default, /* reset to default state */
49605+ .set_status = lmc_t1_set_status, /* reset status to state provided */
49606+ .set_clock_source = lmc_t1_set_clock, /* set clock source */
49607+ .set_speed = lmc_dummy_set2_1, /* set line speed */
49608+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
49609+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49610+ .get_link_status = lmc_t1_get_link_status, /* get link status */
49611+ .set_link_status = lmc_dummy_set_1, /* set link status */
49612+ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
49613+ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
49614+ .watchdog = lmc_t1_watchdog
49615 };
49616
49617 static void
49618diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
49619index feacc3b..5bac0de 100644
49620--- a/drivers/net/wan/z85230.c
49621+++ b/drivers/net/wan/z85230.c
49622@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
49623
49624 struct z8530_irqhandler z8530_sync =
49625 {
49626- z8530_rx,
49627- z8530_tx,
49628- z8530_status
49629+ .rx = z8530_rx,
49630+ .tx = z8530_tx,
49631+ .status = z8530_status
49632 };
49633
49634 EXPORT_SYMBOL(z8530_sync);
49635@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
49636 }
49637
49638 static struct z8530_irqhandler z8530_dma_sync = {
49639- z8530_dma_rx,
49640- z8530_dma_tx,
49641- z8530_dma_status
49642+ .rx = z8530_dma_rx,
49643+ .tx = z8530_dma_tx,
49644+ .status = z8530_dma_status
49645 };
49646
49647 static struct z8530_irqhandler z8530_txdma_sync = {
49648- z8530_rx,
49649- z8530_dma_tx,
49650- z8530_dma_status
49651+ .rx = z8530_rx,
49652+ .tx = z8530_dma_tx,
49653+ .status = z8530_dma_status
49654 };
49655
49656 /**
49657@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
49658
49659 struct z8530_irqhandler z8530_nop=
49660 {
49661- z8530_rx_clear,
49662- z8530_tx_clear,
49663- z8530_status_clear
49664+ .rx = z8530_rx_clear,
49665+ .tx = z8530_tx_clear,
49666+ .status = z8530_status_clear
49667 };
49668
49669
49670diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
49671index 0b60295..b8bfa5b 100644
49672--- a/drivers/net/wimax/i2400m/rx.c
49673+++ b/drivers/net/wimax/i2400m/rx.c
49674@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
49675 if (i2400m->rx_roq == NULL)
49676 goto error_roq_alloc;
49677
49678- rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
49679+ rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
49680 GFP_KERNEL);
49681 if (rd == NULL) {
49682 result = -ENOMEM;
49683diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
49684index e71a2ce..2268d61 100644
49685--- a/drivers/net/wireless/airo.c
49686+++ b/drivers/net/wireless/airo.c
49687@@ -7846,7 +7846,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
49688 struct airo_info *ai = dev->ml_priv;
49689 int ridcode;
49690 int enabled;
49691- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
49692+ int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
49693 unsigned char *iobuf;
49694
49695 /* Only super-user can write RIDs */
49696diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
49697index da92bfa..5a9001a 100644
49698--- a/drivers/net/wireless/at76c50x-usb.c
49699+++ b/drivers/net/wireless/at76c50x-usb.c
49700@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
49701 }
49702
49703 /* Convert timeout from the DFU status to jiffies */
49704-static inline unsigned long at76_get_timeout(struct dfu_status *s)
49705+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
49706 {
49707 return msecs_to_jiffies((s->poll_timeout[2] << 16)
49708 | (s->poll_timeout[1] << 8)
49709diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
49710index 2fd9e18..3f55bdd 100644
49711--- a/drivers/net/wireless/ath/ath10k/htc.c
49712+++ b/drivers/net/wireless/ath/ath10k/htc.c
49713@@ -849,7 +849,10 @@ int ath10k_htc_start(struct ath10k_htc *htc)
49714 /* registered target arrival callback from the HIF layer */
49715 int ath10k_htc_init(struct ath10k *ar)
49716 {
49717- struct ath10k_hif_cb htc_callbacks;
49718+ static struct ath10k_hif_cb htc_callbacks = {
49719+ .rx_completion = ath10k_htc_rx_completion_handler,
49720+ .tx_completion = ath10k_htc_tx_completion_handler,
49721+ };
49722 struct ath10k_htc_ep *ep = NULL;
49723 struct ath10k_htc *htc = &ar->htc;
49724
49725@@ -858,8 +861,6 @@ int ath10k_htc_init(struct ath10k *ar)
49726 ath10k_htc_reset_endpoint_states(htc);
49727
49728 /* setup HIF layer callbacks */
49729- htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
49730- htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
49731 htc->ar = ar;
49732
49733 /* Get HIF default pipe for HTC message exchange */
49734diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
49735index 527179c..a890150 100644
49736--- a/drivers/net/wireless/ath/ath10k/htc.h
49737+++ b/drivers/net/wireless/ath/ath10k/htc.h
49738@@ -270,13 +270,13 @@ enum ath10k_htc_ep_id {
49739
49740 struct ath10k_htc_ops {
49741 void (*target_send_suspend_complete)(struct ath10k *ar);
49742-};
49743+} __no_const;
49744
49745 struct ath10k_htc_ep_ops {
49746 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
49747 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
49748 void (*ep_tx_credits)(struct ath10k *);
49749-};
49750+} __no_const;
49751
49752 /* service connection information */
49753 struct ath10k_htc_svc_conn_req {
49754diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
49755index f816909..e56cd8b 100644
49756--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
49757+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
49758@@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49759 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
49760 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
49761
49762- ACCESS_ONCE(ads->ds_link) = i->link;
49763- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
49764+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
49765+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
49766
49767 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
49768 ctl6 = SM(i->keytype, AR_EncrType);
49769@@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49770
49771 if ((i->is_first || i->is_last) &&
49772 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
49773- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
49774+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
49775 | set11nTries(i->rates, 1)
49776 | set11nTries(i->rates, 2)
49777 | set11nTries(i->rates, 3)
49778 | (i->dur_update ? AR_DurUpdateEna : 0)
49779 | SM(0, AR_BurstDur);
49780
49781- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
49782+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
49783 | set11nRate(i->rates, 1)
49784 | set11nRate(i->rates, 2)
49785 | set11nRate(i->rates, 3);
49786 } else {
49787- ACCESS_ONCE(ads->ds_ctl2) = 0;
49788- ACCESS_ONCE(ads->ds_ctl3) = 0;
49789+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
49790+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
49791 }
49792
49793 if (!i->is_first) {
49794- ACCESS_ONCE(ads->ds_ctl0) = 0;
49795- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
49796- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
49797+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
49798+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
49799+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
49800 return;
49801 }
49802
49803@@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49804 break;
49805 }
49806
49807- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
49808+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
49809 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
49810 | SM(i->txpower[0], AR_XmitPower0)
49811 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
49812@@ -289,27 +289,27 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49813 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
49814 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
49815
49816- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
49817- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
49818+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
49819+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
49820
49821 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
49822 return;
49823
49824- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
49825+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
49826 | set11nPktDurRTSCTS(i->rates, 1);
49827
49828- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
49829+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
49830 | set11nPktDurRTSCTS(i->rates, 3);
49831
49832- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
49833+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
49834 | set11nRateFlags(i->rates, 1)
49835 | set11nRateFlags(i->rates, 2)
49836 | set11nRateFlags(i->rates, 3)
49837 | SM(i->rtscts_rate, AR_RTSCTSRate);
49838
49839- ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
49840- ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
49841- ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
49842+ ACCESS_ONCE_RW(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
49843+ ACCESS_ONCE_RW(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
49844+ ACCESS_ONCE_RW(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
49845 }
49846
49847 static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
49848diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
49849index da84b70..83e4978 100644
49850--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
49851+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
49852@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49853 (i->qcu << AR_TxQcuNum_S) | desc_len;
49854
49855 checksum += val;
49856- ACCESS_ONCE(ads->info) = val;
49857+ ACCESS_ONCE_RW(ads->info) = val;
49858
49859 checksum += i->link;
49860- ACCESS_ONCE(ads->link) = i->link;
49861+ ACCESS_ONCE_RW(ads->link) = i->link;
49862
49863 checksum += i->buf_addr[0];
49864- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
49865+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
49866 checksum += i->buf_addr[1];
49867- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
49868+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
49869 checksum += i->buf_addr[2];
49870- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
49871+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
49872 checksum += i->buf_addr[3];
49873- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
49874+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
49875
49876 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
49877- ACCESS_ONCE(ads->ctl3) = val;
49878+ ACCESS_ONCE_RW(ads->ctl3) = val;
49879 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
49880- ACCESS_ONCE(ads->ctl5) = val;
49881+ ACCESS_ONCE_RW(ads->ctl5) = val;
49882 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
49883- ACCESS_ONCE(ads->ctl7) = val;
49884+ ACCESS_ONCE_RW(ads->ctl7) = val;
49885 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
49886- ACCESS_ONCE(ads->ctl9) = val;
49887+ ACCESS_ONCE_RW(ads->ctl9) = val;
49888
49889 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
49890- ACCESS_ONCE(ads->ctl10) = checksum;
49891+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
49892
49893 if (i->is_first || i->is_last) {
49894- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
49895+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
49896 | set11nTries(i->rates, 1)
49897 | set11nTries(i->rates, 2)
49898 | set11nTries(i->rates, 3)
49899 | (i->dur_update ? AR_DurUpdateEna : 0)
49900 | SM(0, AR_BurstDur);
49901
49902- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
49903+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
49904 | set11nRate(i->rates, 1)
49905 | set11nRate(i->rates, 2)
49906 | set11nRate(i->rates, 3);
49907 } else {
49908- ACCESS_ONCE(ads->ctl13) = 0;
49909- ACCESS_ONCE(ads->ctl14) = 0;
49910+ ACCESS_ONCE_RW(ads->ctl13) = 0;
49911+ ACCESS_ONCE_RW(ads->ctl14) = 0;
49912 }
49913
49914 ads->ctl20 = 0;
49915@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49916
49917 ctl17 = SM(i->keytype, AR_EncrType);
49918 if (!i->is_first) {
49919- ACCESS_ONCE(ads->ctl11) = 0;
49920- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
49921- ACCESS_ONCE(ads->ctl15) = 0;
49922- ACCESS_ONCE(ads->ctl16) = 0;
49923- ACCESS_ONCE(ads->ctl17) = ctl17;
49924- ACCESS_ONCE(ads->ctl18) = 0;
49925- ACCESS_ONCE(ads->ctl19) = 0;
49926+ ACCESS_ONCE_RW(ads->ctl11) = 0;
49927+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
49928+ ACCESS_ONCE_RW(ads->ctl15) = 0;
49929+ ACCESS_ONCE_RW(ads->ctl16) = 0;
49930+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
49931+ ACCESS_ONCE_RW(ads->ctl18) = 0;
49932+ ACCESS_ONCE_RW(ads->ctl19) = 0;
49933 return;
49934 }
49935
49936- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
49937+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
49938 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
49939 | SM(i->txpower[0], AR_XmitPower0)
49940 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
49941@@ -135,26 +135,26 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49942 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
49943 ctl12 |= SM(val, AR_PAPRDChainMask);
49944
49945- ACCESS_ONCE(ads->ctl12) = ctl12;
49946- ACCESS_ONCE(ads->ctl17) = ctl17;
49947+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
49948+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
49949
49950- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
49951+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
49952 | set11nPktDurRTSCTS(i->rates, 1);
49953
49954- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
49955+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
49956 | set11nPktDurRTSCTS(i->rates, 3);
49957
49958- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
49959+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
49960 | set11nRateFlags(i->rates, 1)
49961 | set11nRateFlags(i->rates, 2)
49962 | set11nRateFlags(i->rates, 3)
49963 | SM(i->rtscts_rate, AR_RTSCTSRate);
49964
49965- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
49966+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
49967
49968- ACCESS_ONCE(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
49969- ACCESS_ONCE(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
49970- ACCESS_ONCE(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
49971+ ACCESS_ONCE_RW(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
49972+ ACCESS_ONCE_RW(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
49973+ ACCESS_ONCE_RW(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
49974 }
49975
49976 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
49977diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
49978index e82e570..8c3cf90 100644
49979--- a/drivers/net/wireless/ath/ath9k/hw.h
49980+++ b/drivers/net/wireless/ath/ath9k/hw.h
49981@@ -646,7 +646,7 @@ struct ath_hw_private_ops {
49982
49983 /* ANI */
49984 void (*ani_cache_ini_regs)(struct ath_hw *ah);
49985-};
49986+} __no_const;
49987
49988 /**
49989 * struct ath_spec_scan - parameters for Atheros spectral scan
49990@@ -722,7 +722,7 @@ struct ath_hw_ops {
49991 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
49992 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
49993 #endif
49994-};
49995+} __no_const;
49996
49997 struct ath_nf_limits {
49998 s16 max;
49999diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
50000index 9ede991..a8f08fb 100644
50001--- a/drivers/net/wireless/ath/ath9k/main.c
50002+++ b/drivers/net/wireless/ath/ath9k/main.c
50003@@ -2537,16 +2537,18 @@ void ath9k_fill_chanctx_ops(void)
50004 if (!ath9k_is_chanctx_enabled())
50005 return;
50006
50007- ath9k_ops.hw_scan = ath9k_hw_scan;
50008- ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
50009- ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
50010- ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
50011- ath9k_ops.add_chanctx = ath9k_add_chanctx;
50012- ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
50013- ath9k_ops.change_chanctx = ath9k_change_chanctx;
50014- ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
50015- ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
50016- ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
50017+ pax_open_kernel();
50018+ *(void **)&ath9k_ops.hw_scan = ath9k_hw_scan;
50019+ *(void **)&ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
50020+ *(void **)&ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
50021+ *(void **)&ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
50022+ *(void **)&ath9k_ops.add_chanctx = ath9k_add_chanctx;
50023+ *(void **)&ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
50024+ *(void **)&ath9k_ops.change_chanctx = ath9k_change_chanctx;
50025+ *(void **)&ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
50026+ *(void **)&ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
50027+ *(void **)&ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
50028+ pax_close_kernel();
50029 }
50030
50031 #endif
50032diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
50033index 058a9f2..d5cb1ba 100644
50034--- a/drivers/net/wireless/b43/phy_lp.c
50035+++ b/drivers/net/wireless/b43/phy_lp.c
50036@@ -2502,7 +2502,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
50037 {
50038 struct ssb_bus *bus = dev->dev->sdev->bus;
50039
50040- static const struct b206x_channel *chandata = NULL;
50041+ const struct b206x_channel *chandata = NULL;
50042 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
50043 u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
50044 u16 old_comm15, scale;
50045diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
50046index e566580..2c218ca 100644
50047--- a/drivers/net/wireless/iwlegacy/3945-mac.c
50048+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
50049@@ -3631,7 +3631,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
50050 */
50051 if (il3945_mod_params.disable_hw_scan) {
50052 D_INFO("Disabling hw_scan\n");
50053- il3945_mac_ops.hw_scan = NULL;
50054+ pax_open_kernel();
50055+ *(void **)&il3945_mac_ops.hw_scan = NULL;
50056+ pax_close_kernel();
50057 }
50058
50059 D_INFO("*** LOAD DRIVER ***\n");
50060diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50061index 0ffb6ff..c0b7f0e 100644
50062--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50063+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50064@@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
50065 {
50066 struct iwl_priv *priv = file->private_data;
50067 char buf[64];
50068- int buf_size;
50069+ size_t buf_size;
50070 u32 offset, len;
50071
50072 memset(buf, 0, sizeof(buf));
50073@@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
50074 struct iwl_priv *priv = file->private_data;
50075
50076 char buf[8];
50077- int buf_size;
50078+ size_t buf_size;
50079 u32 reset_flag;
50080
50081 memset(buf, 0, sizeof(buf));
50082@@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
50083 {
50084 struct iwl_priv *priv = file->private_data;
50085 char buf[8];
50086- int buf_size;
50087+ size_t buf_size;
50088 int ht40;
50089
50090 memset(buf, 0, sizeof(buf));
50091@@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
50092 {
50093 struct iwl_priv *priv = file->private_data;
50094 char buf[8];
50095- int buf_size;
50096+ size_t buf_size;
50097 int value;
50098
50099 memset(buf, 0, sizeof(buf));
50100@@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
50101 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
50102 DEBUGFS_READ_FILE_OPS(current_sleep_command);
50103
50104-static const char *fmt_value = " %-30s %10u\n";
50105-static const char *fmt_hex = " %-30s 0x%02X\n";
50106-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
50107-static const char *fmt_header =
50108+static const char fmt_value[] = " %-30s %10u\n";
50109+static const char fmt_hex[] = " %-30s 0x%02X\n";
50110+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
50111+static const char fmt_header[] =
50112 "%-32s current cumulative delta max\n";
50113
50114 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
50115@@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
50116 {
50117 struct iwl_priv *priv = file->private_data;
50118 char buf[8];
50119- int buf_size;
50120+ size_t buf_size;
50121 int clear;
50122
50123 memset(buf, 0, sizeof(buf));
50124@@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
50125 {
50126 struct iwl_priv *priv = file->private_data;
50127 char buf[8];
50128- int buf_size;
50129+ size_t buf_size;
50130 int trace;
50131
50132 memset(buf, 0, sizeof(buf));
50133@@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
50134 {
50135 struct iwl_priv *priv = file->private_data;
50136 char buf[8];
50137- int buf_size;
50138+ size_t buf_size;
50139 int missed;
50140
50141 memset(buf, 0, sizeof(buf));
50142@@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
50143
50144 struct iwl_priv *priv = file->private_data;
50145 char buf[8];
50146- int buf_size;
50147+ size_t buf_size;
50148 int plcp;
50149
50150 memset(buf, 0, sizeof(buf));
50151@@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
50152
50153 struct iwl_priv *priv = file->private_data;
50154 char buf[8];
50155- int buf_size;
50156+ size_t buf_size;
50157 int flush;
50158
50159 memset(buf, 0, sizeof(buf));
50160@@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
50161
50162 struct iwl_priv *priv = file->private_data;
50163 char buf[8];
50164- int buf_size;
50165+ size_t buf_size;
50166 int rts;
50167
50168 if (!priv->cfg->ht_params)
50169@@ -2204,7 +2204,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
50170 {
50171 struct iwl_priv *priv = file->private_data;
50172 char buf[8];
50173- int buf_size;
50174+ size_t buf_size;
50175
50176 memset(buf, 0, sizeof(buf));
50177 buf_size = min(count, sizeof(buf) - 1);
50178@@ -2238,7 +2238,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
50179 struct iwl_priv *priv = file->private_data;
50180 u32 event_log_flag;
50181 char buf[8];
50182- int buf_size;
50183+ size_t buf_size;
50184
50185 /* check that the interface is up */
50186 if (!iwl_is_ready(priv))
50187@@ -2292,7 +2292,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
50188 struct iwl_priv *priv = file->private_data;
50189 char buf[8];
50190 u32 calib_disabled;
50191- int buf_size;
50192+ size_t buf_size;
50193
50194 memset(buf, 0, sizeof(buf));
50195 buf_size = min(count, sizeof(buf) - 1);
50196diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
50197index 69935aa..c1ca128 100644
50198--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
50199+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
50200@@ -1836,7 +1836,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
50201 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
50202
50203 char buf[8];
50204- int buf_size;
50205+ size_t buf_size;
50206 u32 reset_flag;
50207
50208 memset(buf, 0, sizeof(buf));
50209@@ -1857,7 +1857,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
50210 {
50211 struct iwl_trans *trans = file->private_data;
50212 char buf[8];
50213- int buf_size;
50214+ size_t buf_size;
50215 int csr;
50216
50217 memset(buf, 0, sizeof(buf));
50218diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
50219index 8908be6..fe97ddd 100644
50220--- a/drivers/net/wireless/mac80211_hwsim.c
50221+++ b/drivers/net/wireless/mac80211_hwsim.c
50222@@ -3070,20 +3070,20 @@ static int __init init_mac80211_hwsim(void)
50223 if (channels < 1)
50224 return -EINVAL;
50225
50226- mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
50227- mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
50228- mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
50229- mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
50230- mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
50231- mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
50232- mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
50233- mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
50234- mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
50235- mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
50236- mac80211_hwsim_mchan_ops.assign_vif_chanctx =
50237- mac80211_hwsim_assign_vif_chanctx;
50238- mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
50239- mac80211_hwsim_unassign_vif_chanctx;
50240+ pax_open_kernel();
50241+ memcpy((void *)&mac80211_hwsim_mchan_ops, &mac80211_hwsim_ops, sizeof mac80211_hwsim_mchan_ops);
50242+ *(void **)&mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
50243+ *(void **)&mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
50244+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
50245+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
50246+ *(void **)&mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
50247+ *(void **)&mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
50248+ *(void **)&mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
50249+ *(void **)&mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
50250+ *(void **)&mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
50251+ *(void **)&mac80211_hwsim_mchan_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
50252+ *(void **)&mac80211_hwsim_mchan_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
50253+ pax_close_kernel();
50254
50255 spin_lock_init(&hwsim_radio_lock);
50256 INIT_LIST_HEAD(&hwsim_radios);
50257diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
50258index 60d44ce..884dd1c 100644
50259--- a/drivers/net/wireless/rndis_wlan.c
50260+++ b/drivers/net/wireless/rndis_wlan.c
50261@@ -1236,7 +1236,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
50262
50263 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
50264
50265- if (rts_threshold < 0 || rts_threshold > 2347)
50266+ if (rts_threshold > 2347)
50267 rts_threshold = 2347;
50268
50269 tmp = cpu_to_le32(rts_threshold);
50270diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
50271index 9bb398b..b0cc047 100644
50272--- a/drivers/net/wireless/rt2x00/rt2x00.h
50273+++ b/drivers/net/wireless/rt2x00/rt2x00.h
50274@@ -375,7 +375,7 @@ struct rt2x00_intf {
50275 * for hardware which doesn't support hardware
50276 * sequence counting.
50277 */
50278- atomic_t seqno;
50279+ atomic_unchecked_t seqno;
50280 };
50281
50282 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
50283diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
50284index 68b620b..92ecd9e 100644
50285--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
50286+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
50287@@ -224,9 +224,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
50288 * sequence counter given by mac80211.
50289 */
50290 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
50291- seqno = atomic_add_return(0x10, &intf->seqno);
50292+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
50293 else
50294- seqno = atomic_read(&intf->seqno);
50295+ seqno = atomic_read_unchecked(&intf->seqno);
50296
50297 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
50298 hdr->seq_ctrl |= cpu_to_le16(seqno);
50299diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
50300index b661f896..ddf7d2b 100644
50301--- a/drivers/net/wireless/ti/wl1251/sdio.c
50302+++ b/drivers/net/wireless/ti/wl1251/sdio.c
50303@@ -282,13 +282,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
50304
50305 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
50306
50307- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
50308- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
50309+ pax_open_kernel();
50310+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
50311+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
50312+ pax_close_kernel();
50313
50314 wl1251_info("using dedicated interrupt line");
50315 } else {
50316- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
50317- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
50318+ pax_open_kernel();
50319+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
50320+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
50321+ pax_close_kernel();
50322
50323 wl1251_info("using SDIO interrupt");
50324 }
50325diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
50326index 144d1f8..7030936 100644
50327--- a/drivers/net/wireless/ti/wl12xx/main.c
50328+++ b/drivers/net/wireless/ti/wl12xx/main.c
50329@@ -657,7 +657,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
50330 sizeof(wl->conf.mem));
50331
50332 /* read data preparation is only needed by wl127x */
50333- wl->ops->prepare_read = wl127x_prepare_read;
50334+ pax_open_kernel();
50335+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
50336+ pax_close_kernel();
50337
50338 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
50339 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
50340@@ -682,7 +684,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
50341 sizeof(wl->conf.mem));
50342
50343 /* read data preparation is only needed by wl127x */
50344- wl->ops->prepare_read = wl127x_prepare_read;
50345+ pax_open_kernel();
50346+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
50347+ pax_close_kernel();
50348
50349 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
50350 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
50351diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
50352index 717c4f5..a813aeb 100644
50353--- a/drivers/net/wireless/ti/wl18xx/main.c
50354+++ b/drivers/net/wireless/ti/wl18xx/main.c
50355@@ -1923,8 +1923,10 @@ static int wl18xx_setup(struct wl1271 *wl)
50356 }
50357
50358 if (!checksum_param) {
50359- wl18xx_ops.set_rx_csum = NULL;
50360- wl18xx_ops.init_vif = NULL;
50361+ pax_open_kernel();
50362+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
50363+ *(void **)&wl18xx_ops.init_vif = NULL;
50364+ pax_close_kernel();
50365 }
50366
50367 /* Enable 11a Band only if we have 5G antennas */
50368diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
50369index a912dc0..a8225ba 100644
50370--- a/drivers/net/wireless/zd1211rw/zd_usb.c
50371+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
50372@@ -385,7 +385,7 @@ static inline void handle_regs_int(struct urb *urb)
50373 {
50374 struct zd_usb *usb = urb->context;
50375 struct zd_usb_interrupt *intr = &usb->intr;
50376- int len;
50377+ unsigned int len;
50378 u16 int_num;
50379
50380 ZD_ASSERT(in_interrupt());
50381diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
50382index ce2e2cf..f81e500 100644
50383--- a/drivers/nfc/nfcwilink.c
50384+++ b/drivers/nfc/nfcwilink.c
50385@@ -497,7 +497,7 @@ static struct nci_ops nfcwilink_ops = {
50386
50387 static int nfcwilink_probe(struct platform_device *pdev)
50388 {
50389- static struct nfcwilink *drv;
50390+ struct nfcwilink *drv;
50391 int rc;
50392 __u32 protocols;
50393
50394diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c
50395index 24d3d24..ff70d28 100644
50396--- a/drivers/nfc/st21nfca/st21nfca.c
50397+++ b/drivers/nfc/st21nfca/st21nfca.c
50398@@ -588,7 +588,7 @@ static int st21nfca_get_iso14443_3_uid(struct nfc_hci_dev *hdev, u8 *gate,
50399 goto exit;
50400 }
50401
50402- gate = uid_skb->data;
50403+ memcpy(gate, uid_skb->data, uid_skb->len);
50404 *len = uid_skb->len;
50405 exit:
50406 kfree_skb(uid_skb);
50407diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
50408index 3a896c9..ac7b1c8 100644
50409--- a/drivers/of/fdt.c
50410+++ b/drivers/of/fdt.c
50411@@ -1118,7 +1118,9 @@ static int __init of_fdt_raw_init(void)
50412 pr_warn("fdt: not creating '/sys/firmware/fdt': CRC check failed\n");
50413 return 0;
50414 }
50415- of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
50416+ pax_open_kernel();
50417+ *(size_t *)&of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
50418+ pax_close_kernel();
50419 return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
50420 }
50421 late_initcall(of_fdt_raw_init);
50422diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
50423index d93b2b6..ae50401 100644
50424--- a/drivers/oprofile/buffer_sync.c
50425+++ b/drivers/oprofile/buffer_sync.c
50426@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
50427 if (cookie == NO_COOKIE)
50428 offset = pc;
50429 if (cookie == INVALID_COOKIE) {
50430- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
50431+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
50432 offset = pc;
50433 }
50434 if (cookie != last_cookie) {
50435@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
50436 /* add userspace sample */
50437
50438 if (!mm) {
50439- atomic_inc(&oprofile_stats.sample_lost_no_mm);
50440+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
50441 return 0;
50442 }
50443
50444 cookie = lookup_dcookie(mm, s->eip, &offset);
50445
50446 if (cookie == INVALID_COOKIE) {
50447- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
50448+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
50449 return 0;
50450 }
50451
50452@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
50453 /* ignore backtraces if failed to add a sample */
50454 if (state == sb_bt_start) {
50455 state = sb_bt_ignore;
50456- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
50457+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
50458 }
50459 }
50460 release_mm(mm);
50461diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
50462index c0cc4e7..44d4e54 100644
50463--- a/drivers/oprofile/event_buffer.c
50464+++ b/drivers/oprofile/event_buffer.c
50465@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
50466 }
50467
50468 if (buffer_pos == buffer_size) {
50469- atomic_inc(&oprofile_stats.event_lost_overflow);
50470+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
50471 return;
50472 }
50473
50474diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
50475index ed2c3ec..deda85a 100644
50476--- a/drivers/oprofile/oprof.c
50477+++ b/drivers/oprofile/oprof.c
50478@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
50479 if (oprofile_ops.switch_events())
50480 return;
50481
50482- atomic_inc(&oprofile_stats.multiplex_counter);
50483+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
50484 start_switch_worker();
50485 }
50486
50487diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
50488index ee2cfce..7f8f699 100644
50489--- a/drivers/oprofile/oprofile_files.c
50490+++ b/drivers/oprofile/oprofile_files.c
50491@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
50492
50493 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
50494
50495-static ssize_t timeout_read(struct file *file, char __user *buf,
50496+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
50497 size_t count, loff_t *offset)
50498 {
50499 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
50500diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
50501index 59659ce..6c860a0 100644
50502--- a/drivers/oprofile/oprofile_stats.c
50503+++ b/drivers/oprofile/oprofile_stats.c
50504@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
50505 cpu_buf->sample_invalid_eip = 0;
50506 }
50507
50508- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
50509- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
50510- atomic_set(&oprofile_stats.event_lost_overflow, 0);
50511- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
50512- atomic_set(&oprofile_stats.multiplex_counter, 0);
50513+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
50514+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
50515+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
50516+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
50517+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
50518 }
50519
50520
50521diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
50522index 1fc622b..8c48fc3 100644
50523--- a/drivers/oprofile/oprofile_stats.h
50524+++ b/drivers/oprofile/oprofile_stats.h
50525@@ -13,11 +13,11 @@
50526 #include <linux/atomic.h>
50527
50528 struct oprofile_stat_struct {
50529- atomic_t sample_lost_no_mm;
50530- atomic_t sample_lost_no_mapping;
50531- atomic_t bt_lost_no_mapping;
50532- atomic_t event_lost_overflow;
50533- atomic_t multiplex_counter;
50534+ atomic_unchecked_t sample_lost_no_mm;
50535+ atomic_unchecked_t sample_lost_no_mapping;
50536+ atomic_unchecked_t bt_lost_no_mapping;
50537+ atomic_unchecked_t event_lost_overflow;
50538+ atomic_unchecked_t multiplex_counter;
50539 };
50540
50541 extern struct oprofile_stat_struct oprofile_stats;
50542diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
50543index 3f49345..c750d0b 100644
50544--- a/drivers/oprofile/oprofilefs.c
50545+++ b/drivers/oprofile/oprofilefs.c
50546@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
50547
50548 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
50549 {
50550- atomic_t *val = file->private_data;
50551- return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
50552+ atomic_unchecked_t *val = file->private_data;
50553+ return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
50554 }
50555
50556
50557@@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
50558
50559
50560 int oprofilefs_create_ro_atomic(struct dentry *root,
50561- char const *name, atomic_t *val)
50562+ char const *name, atomic_unchecked_t *val)
50563 {
50564 return __oprofilefs_create_file(root, name,
50565 &atomic_ro_fops, 0444, val);
50566diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
50567index bdef916..88c7dee 100644
50568--- a/drivers/oprofile/timer_int.c
50569+++ b/drivers/oprofile/timer_int.c
50570@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
50571 return NOTIFY_OK;
50572 }
50573
50574-static struct notifier_block __refdata oprofile_cpu_notifier = {
50575+static struct notifier_block oprofile_cpu_notifier = {
50576 .notifier_call = oprofile_cpu_notify,
50577 };
50578
50579diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
50580index 3b47080..6cd05dd 100644
50581--- a/drivers/parport/procfs.c
50582+++ b/drivers/parport/procfs.c
50583@@ -64,7 +64,7 @@ static int do_active_device(struct ctl_table *table, int write,
50584
50585 *ppos += len;
50586
50587- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
50588+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
50589 }
50590
50591 #ifdef CONFIG_PARPORT_1284
50592@@ -106,7 +106,7 @@ static int do_autoprobe(struct ctl_table *table, int write,
50593
50594 *ppos += len;
50595
50596- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
50597+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
50598 }
50599 #endif /* IEEE1284.3 support. */
50600
50601diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
50602index ba46e58..90cfc24 100644
50603--- a/drivers/pci/host/pci-host-generic.c
50604+++ b/drivers/pci/host/pci-host-generic.c
50605@@ -26,9 +26,9 @@
50606 #include <linux/platform_device.h>
50607
50608 struct gen_pci_cfg_bus_ops {
50609+ struct pci_ops ops;
50610 u32 bus_shift;
50611- void __iomem *(*map_bus)(struct pci_bus *, unsigned int, int);
50612-};
50613+} __do_const;
50614
50615 struct gen_pci_cfg_windows {
50616 struct resource res;
50617@@ -56,8 +56,12 @@ static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus,
50618 }
50619
50620 static struct gen_pci_cfg_bus_ops gen_pci_cfg_cam_bus_ops = {
50621+ .ops = {
50622+ .map_bus = gen_pci_map_cfg_bus_cam,
50623+ .read = pci_generic_config_read,
50624+ .write = pci_generic_config_write,
50625+ },
50626 .bus_shift = 16,
50627- .map_bus = gen_pci_map_cfg_bus_cam,
50628 };
50629
50630 static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
50631@@ -72,13 +76,12 @@ static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
50632 }
50633
50634 static struct gen_pci_cfg_bus_ops gen_pci_cfg_ecam_bus_ops = {
50635+ .ops = {
50636+ .map_bus = gen_pci_map_cfg_bus_ecam,
50637+ .read = pci_generic_config_read,
50638+ .write = pci_generic_config_write,
50639+ },
50640 .bus_shift = 20,
50641- .map_bus = gen_pci_map_cfg_bus_ecam,
50642-};
50643-
50644-static struct pci_ops gen_pci_ops = {
50645- .read = pci_generic_config_read,
50646- .write = pci_generic_config_write,
50647 };
50648
50649 static const struct of_device_id gen_pci_of_match[] = {
50650@@ -219,7 +222,6 @@ static int gen_pci_probe(struct platform_device *pdev)
50651 .private_data = (void **)&pci,
50652 .setup = gen_pci_setup,
50653 .map_irq = of_irq_parse_and_map_pci,
50654- .ops = &gen_pci_ops,
50655 };
50656
50657 if (!pci)
50658@@ -241,7 +243,7 @@ static int gen_pci_probe(struct platform_device *pdev)
50659
50660 of_id = of_match_node(gen_pci_of_match, np);
50661 pci->cfg.ops = of_id->data;
50662- gen_pci_ops.map_bus = pci->cfg.ops->map_bus;
50663+ hw.ops = &pci->cfg.ops->ops;
50664 pci->host.dev.parent = dev;
50665 INIT_LIST_HEAD(&pci->host.windows);
50666 INIT_LIST_HEAD(&pci->resources);
50667diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
50668index 6ca2399..68d866b 100644
50669--- a/drivers/pci/hotplug/acpiphp_ibm.c
50670+++ b/drivers/pci/hotplug/acpiphp_ibm.c
50671@@ -452,7 +452,9 @@ static int __init ibm_acpiphp_init(void)
50672 goto init_cleanup;
50673 }
50674
50675- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
50676+ pax_open_kernel();
50677+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
50678+ pax_close_kernel();
50679 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
50680
50681 return retval;
50682diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
50683index 66b7bbe..26bee78 100644
50684--- a/drivers/pci/hotplug/cpcihp_generic.c
50685+++ b/drivers/pci/hotplug/cpcihp_generic.c
50686@@ -73,7 +73,6 @@ static u16 port;
50687 static unsigned int enum_bit;
50688 static u8 enum_mask;
50689
50690-static struct cpci_hp_controller_ops generic_hpc_ops;
50691 static struct cpci_hp_controller generic_hpc;
50692
50693 static int __init validate_parameters(void)
50694@@ -139,6 +138,10 @@ static int query_enum(void)
50695 return ((value & enum_mask) == enum_mask);
50696 }
50697
50698+static struct cpci_hp_controller_ops generic_hpc_ops = {
50699+ .query_enum = query_enum,
50700+};
50701+
50702 static int __init cpcihp_generic_init(void)
50703 {
50704 int status;
50705@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
50706 pci_dev_put(dev);
50707
50708 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
50709- generic_hpc_ops.query_enum = query_enum;
50710 generic_hpc.ops = &generic_hpc_ops;
50711
50712 status = cpci_hp_register_controller(&generic_hpc);
50713diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
50714index 7ecf34e..effed62 100644
50715--- a/drivers/pci/hotplug/cpcihp_zt5550.c
50716+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
50717@@ -59,7 +59,6 @@
50718 /* local variables */
50719 static bool debug;
50720 static bool poll;
50721-static struct cpci_hp_controller_ops zt5550_hpc_ops;
50722 static struct cpci_hp_controller zt5550_hpc;
50723
50724 /* Primary cPCI bus bridge device */
50725@@ -204,6 +203,10 @@ static int zt5550_hc_disable_irq(void)
50726 return 0;
50727 }
50728
50729+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
50730+ .query_enum = zt5550_hc_query_enum,
50731+};
50732+
50733 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
50734 {
50735 int status;
50736@@ -215,16 +218,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
50737 dbg("returned from zt5550_hc_config");
50738
50739 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
50740- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
50741 zt5550_hpc.ops = &zt5550_hpc_ops;
50742 if (!poll) {
50743 zt5550_hpc.irq = hc_dev->irq;
50744 zt5550_hpc.irq_flags = IRQF_SHARED;
50745 zt5550_hpc.dev_id = hc_dev;
50746
50747- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
50748- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
50749- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
50750+ pax_open_kernel();
50751+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
50752+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
50753+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
50754+ pax_open_kernel();
50755 } else {
50756 info("using ENUM# polling mode");
50757 }
50758diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
50759index 1e08ff8c..3cd145f 100644
50760--- a/drivers/pci/hotplug/cpqphp_nvram.c
50761+++ b/drivers/pci/hotplug/cpqphp_nvram.c
50762@@ -425,8 +425,10 @@ static u32 store_HRT (void __iomem *rom_start)
50763
50764 void compaq_nvram_init (void __iomem *rom_start)
50765 {
50766+#ifndef CONFIG_PAX_KERNEXEC
50767 if (rom_start)
50768 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
50769+#endif
50770
50771 dbg("int15 entry = %p\n", compaq_int15_entry_point);
50772
50773diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
50774index 56d8486..f26113f 100644
50775--- a/drivers/pci/hotplug/pci_hotplug_core.c
50776+++ b/drivers/pci/hotplug/pci_hotplug_core.c
50777@@ -436,8 +436,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
50778 return -EINVAL;
50779 }
50780
50781- slot->ops->owner = owner;
50782- slot->ops->mod_name = mod_name;
50783+ pax_open_kernel();
50784+ *(struct module **)&slot->ops->owner = owner;
50785+ *(const char **)&slot->ops->mod_name = mod_name;
50786+ pax_close_kernel();
50787
50788 mutex_lock(&pci_hp_mutex);
50789 /*
50790diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
50791index 07aa722..84514b4 100644
50792--- a/drivers/pci/hotplug/pciehp_core.c
50793+++ b/drivers/pci/hotplug/pciehp_core.c
50794@@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
50795 struct slot *slot = ctrl->slot;
50796 struct hotplug_slot *hotplug = NULL;
50797 struct hotplug_slot_info *info = NULL;
50798- struct hotplug_slot_ops *ops = NULL;
50799+ hotplug_slot_ops_no_const *ops = NULL;
50800 char name[SLOT_NAME_SIZE];
50801 int retval = -ENOMEM;
50802
50803diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
50804index c3e7dfc..cbd9625 100644
50805--- a/drivers/pci/msi.c
50806+++ b/drivers/pci/msi.c
50807@@ -513,8 +513,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
50808 {
50809 struct attribute **msi_attrs;
50810 struct attribute *msi_attr;
50811- struct device_attribute *msi_dev_attr;
50812- struct attribute_group *msi_irq_group;
50813+ device_attribute_no_const *msi_dev_attr;
50814+ attribute_group_no_const *msi_irq_group;
50815 const struct attribute_group **msi_irq_groups;
50816 struct msi_desc *entry;
50817 int ret = -ENOMEM;
50818@@ -573,7 +573,7 @@ error_attrs:
50819 count = 0;
50820 msi_attr = msi_attrs[count];
50821 while (msi_attr) {
50822- msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
50823+ msi_dev_attr = container_of(msi_attr, device_attribute_no_const, attr);
50824 kfree(msi_attr->name);
50825 kfree(msi_dev_attr);
50826 ++count;
50827diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
50828index 312f23a..d21181c 100644
50829--- a/drivers/pci/pci-sysfs.c
50830+++ b/drivers/pci/pci-sysfs.c
50831@@ -1140,7 +1140,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
50832 {
50833 /* allocate attribute structure, piggyback attribute name */
50834 int name_len = write_combine ? 13 : 10;
50835- struct bin_attribute *res_attr;
50836+ bin_attribute_no_const *res_attr;
50837 int retval;
50838
50839 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
50840@@ -1317,7 +1317,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
50841 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
50842 {
50843 int retval;
50844- struct bin_attribute *attr;
50845+ bin_attribute_no_const *attr;
50846
50847 /* If the device has VPD, try to expose it in sysfs. */
50848 if (dev->vpd) {
50849@@ -1364,7 +1364,7 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
50850 {
50851 int retval;
50852 int rom_size = 0;
50853- struct bin_attribute *attr;
50854+ bin_attribute_no_const *attr;
50855
50856 if (!sysfs_initialized)
50857 return -EACCES;
50858diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
50859index 4091f82..7d98eef 100644
50860--- a/drivers/pci/pci.h
50861+++ b/drivers/pci/pci.h
50862@@ -99,7 +99,7 @@ struct pci_vpd_ops {
50863 struct pci_vpd {
50864 unsigned int len;
50865 const struct pci_vpd_ops *ops;
50866- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
50867+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
50868 };
50869
50870 int pci_vpd_pci22_init(struct pci_dev *dev);
50871diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
50872index 820740a..8b1c673 100644
50873--- a/drivers/pci/pcie/aspm.c
50874+++ b/drivers/pci/pcie/aspm.c
50875@@ -27,9 +27,9 @@
50876 #define MODULE_PARAM_PREFIX "pcie_aspm."
50877
50878 /* Note: those are not register definitions */
50879-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
50880-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
50881-#define ASPM_STATE_L1 (4) /* L1 state */
50882+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
50883+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
50884+#define ASPM_STATE_L1 (4U) /* L1 state */
50885 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
50886 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
50887
50888diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
50889index be35da2..ec16cdb 100644
50890--- a/drivers/pci/pcie/portdrv_pci.c
50891+++ b/drivers/pci/pcie/portdrv_pci.c
50892@@ -324,7 +324,7 @@ static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d)
50893 return 0;
50894 }
50895
50896-static struct dmi_system_id __initdata pcie_portdrv_dmi_table[] = {
50897+static const struct dmi_system_id __initconst pcie_portdrv_dmi_table[] = {
50898 /*
50899 * Boxes that should not use MSI for PCIe PME signaling.
50900 */
50901diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
50902index 8d2f400..c97cc91 100644
50903--- a/drivers/pci/probe.c
50904+++ b/drivers/pci/probe.c
50905@@ -175,7 +175,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
50906 u16 orig_cmd;
50907 struct pci_bus_region region, inverted_region;
50908
50909- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
50910+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
50911
50912 /* No printks while decoding is disabled! */
50913 if (!dev->mmio_always_on) {
50914diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
50915index 3f155e7..0f4b1f0 100644
50916--- a/drivers/pci/proc.c
50917+++ b/drivers/pci/proc.c
50918@@ -434,7 +434,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
50919 static int __init pci_proc_init(void)
50920 {
50921 struct pci_dev *dev = NULL;
50922+
50923+#ifdef CONFIG_GRKERNSEC_PROC_ADD
50924+#ifdef CONFIG_GRKERNSEC_PROC_USER
50925+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
50926+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50927+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
50928+#endif
50929+#else
50930 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
50931+#endif
50932 proc_create("devices", 0, proc_bus_pci_dir,
50933 &proc_bus_pci_dev_operations);
50934 proc_initialized = 1;
50935diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
50936index b84fdd6..b89d829 100644
50937--- a/drivers/platform/chrome/chromeos_laptop.c
50938+++ b/drivers/platform/chrome/chromeos_laptop.c
50939@@ -479,7 +479,7 @@ static struct chromeos_laptop cr48 = {
50940 .callback = chromeos_laptop_dmi_matched, \
50941 .driver_data = (void *)&board_
50942
50943-static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = {
50944+static struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
50945 {
50946 .ident = "Samsung Series 5 550",
50947 .matches = {
50948diff --git a/drivers/platform/chrome/chromeos_pstore.c b/drivers/platform/chrome/chromeos_pstore.c
50949index 3474920..acc9581 100644
50950--- a/drivers/platform/chrome/chromeos_pstore.c
50951+++ b/drivers/platform/chrome/chromeos_pstore.c
50952@@ -13,7 +13,7 @@
50953 #include <linux/platform_device.h>
50954 #include <linux/pstore_ram.h>
50955
50956-static struct dmi_system_id chromeos_pstore_dmi_table[] __initdata = {
50957+static const struct dmi_system_id chromeos_pstore_dmi_table[] __initconst = {
50958 {
50959 /*
50960 * Today all Chromebooks/boxes ship with Google_* as version and
50961diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
50962index 1e1e594..8fe59c5 100644
50963--- a/drivers/platform/x86/alienware-wmi.c
50964+++ b/drivers/platform/x86/alienware-wmi.c
50965@@ -150,7 +150,7 @@ struct wmax_led_args {
50966 } __packed;
50967
50968 static struct platform_device *platform_device;
50969-static struct device_attribute *zone_dev_attrs;
50970+static device_attribute_no_const *zone_dev_attrs;
50971 static struct attribute **zone_attrs;
50972 static struct platform_zone *zone_data;
50973
50974@@ -160,7 +160,7 @@ static struct platform_driver platform_driver = {
50975 }
50976 };
50977
50978-static struct attribute_group zone_attribute_group = {
50979+static attribute_group_no_const zone_attribute_group = {
50980 .name = "rgb_zones",
50981 };
50982
50983diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
50984index 7543a56..367ca8ed 100644
50985--- a/drivers/platform/x86/asus-wmi.c
50986+++ b/drivers/platform/x86/asus-wmi.c
50987@@ -1589,6 +1589,10 @@ static int show_dsts(struct seq_file *m, void *data)
50988 int err;
50989 u32 retval = -1;
50990
50991+#ifdef CONFIG_GRKERNSEC_KMEM
50992+ return -EPERM;
50993+#endif
50994+
50995 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
50996
50997 if (err < 0)
50998@@ -1605,6 +1609,10 @@ static int show_devs(struct seq_file *m, void *data)
50999 int err;
51000 u32 retval = -1;
51001
51002+#ifdef CONFIG_GRKERNSEC_KMEM
51003+ return -EPERM;
51004+#endif
51005+
51006 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
51007 &retval);
51008
51009@@ -1629,6 +1637,10 @@ static int show_call(struct seq_file *m, void *data)
51010 union acpi_object *obj;
51011 acpi_status status;
51012
51013+#ifdef CONFIG_GRKERNSEC_KMEM
51014+ return -EPERM;
51015+#endif
51016+
51017 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
51018 1, asus->debug.method_id,
51019 &input, &output);
51020diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
51021index bceb30b..bf063d4 100644
51022--- a/drivers/platform/x86/compal-laptop.c
51023+++ b/drivers/platform/x86/compal-laptop.c
51024@@ -766,7 +766,7 @@ static int dmi_check_cb_extra(const struct dmi_system_id *id)
51025 return 1;
51026 }
51027
51028-static struct dmi_system_id __initdata compal_dmi_table[] = {
51029+static const struct dmi_system_id __initconst compal_dmi_table[] = {
51030 {
51031 .ident = "FL90/IFL90",
51032 .matches = {
51033diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
51034index 458e6c9..089aee7 100644
51035--- a/drivers/platform/x86/hdaps.c
51036+++ b/drivers/platform/x86/hdaps.c
51037@@ -514,7 +514,7 @@ static int __init hdaps_dmi_match_invert(const struct dmi_system_id *id)
51038 "ThinkPad T42p", so the order of the entries matters.
51039 If your ThinkPad is not recognized, please update to latest
51040 BIOS. This is especially the case for some R52 ThinkPads. */
51041-static struct dmi_system_id __initdata hdaps_whitelist[] = {
51042+static const struct dmi_system_id __initconst hdaps_whitelist[] = {
51043 HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad R50p", HDAPS_BOTH_AXES),
51044 HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R50"),
51045 HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R51"),
51046diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
51047index 97c2be1..2ee50ce 100644
51048--- a/drivers/platform/x86/ibm_rtl.c
51049+++ b/drivers/platform/x86/ibm_rtl.c
51050@@ -227,7 +227,7 @@ static void rtl_teardown_sysfs(void) {
51051 }
51052
51053
51054-static struct dmi_system_id __initdata ibm_rtl_dmi_table[] = {
51055+static const struct dmi_system_id __initconst ibm_rtl_dmi_table[] = {
51056 { \
51057 .matches = { \
51058 DMI_MATCH(DMI_SYS_VENDOR, "IBM"), \
51059diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c
51060index a4a4258..a58a04c 100644
51061--- a/drivers/platform/x86/intel_oaktrail.c
51062+++ b/drivers/platform/x86/intel_oaktrail.c
51063@@ -298,7 +298,7 @@ static int dmi_check_cb(const struct dmi_system_id *id)
51064 return 0;
51065 }
51066
51067-static struct dmi_system_id __initdata oaktrail_dmi_table[] = {
51068+static const struct dmi_system_id __initconst oaktrail_dmi_table[] = {
51069 {
51070 .ident = "OakTrail platform",
51071 .matches = {
51072diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
51073index 0859877..59d596d 100644
51074--- a/drivers/platform/x86/msi-laptop.c
51075+++ b/drivers/platform/x86/msi-laptop.c
51076@@ -604,7 +604,7 @@ static int dmi_check_cb(const struct dmi_system_id *dmi)
51077 return 1;
51078 }
51079
51080-static struct dmi_system_id __initdata msi_dmi_table[] = {
51081+static const struct dmi_system_id __initconst msi_dmi_table[] = {
51082 {
51083 .ident = "MSI S270",
51084 .matches = {
51085@@ -999,12 +999,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
51086
51087 if (!quirks->ec_read_only) {
51088 /* allow userland write sysfs file */
51089- dev_attr_bluetooth.store = store_bluetooth;
51090- dev_attr_wlan.store = store_wlan;
51091- dev_attr_threeg.store = store_threeg;
51092- dev_attr_bluetooth.attr.mode |= S_IWUSR;
51093- dev_attr_wlan.attr.mode |= S_IWUSR;
51094- dev_attr_threeg.attr.mode |= S_IWUSR;
51095+ pax_open_kernel();
51096+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
51097+ *(void **)&dev_attr_wlan.store = store_wlan;
51098+ *(void **)&dev_attr_threeg.store = store_threeg;
51099+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
51100+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
51101+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
51102+ pax_close_kernel();
51103 }
51104
51105 /* disable hardware control by fn key */
51106diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
51107index 6d2bac0..ec2b029 100644
51108--- a/drivers/platform/x86/msi-wmi.c
51109+++ b/drivers/platform/x86/msi-wmi.c
51110@@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
51111 static void msi_wmi_notify(u32 value, void *context)
51112 {
51113 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
51114- static struct key_entry *key;
51115+ struct key_entry *key;
51116 union acpi_object *obj;
51117 acpi_status status;
51118
51119diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
51120index 9e701b2..c68a7b5 100644
51121--- a/drivers/platform/x86/samsung-laptop.c
51122+++ b/drivers/platform/x86/samsung-laptop.c
51123@@ -1567,7 +1567,7 @@ static int __init samsung_dmi_matched(const struct dmi_system_id *d)
51124 return 0;
51125 }
51126
51127-static struct dmi_system_id __initdata samsung_dmi_table[] = {
51128+static const struct dmi_system_id __initconst samsung_dmi_table[] = {
51129 {
51130 .matches = {
51131 DMI_MATCH(DMI_SYS_VENDOR,
51132diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c
51133index e6aac72..e11ff24 100644
51134--- a/drivers/platform/x86/samsung-q10.c
51135+++ b/drivers/platform/x86/samsung-q10.c
51136@@ -95,7 +95,7 @@ static int __init dmi_check_callback(const struct dmi_system_id *id)
51137 return 1;
51138 }
51139
51140-static struct dmi_system_id __initdata samsungq10_dmi_table[] = {
51141+static const struct dmi_system_id __initconst samsungq10_dmi_table[] = {
51142 {
51143 .ident = "Samsung Q10",
51144 .matches = {
51145diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
51146index e51c1e7..71bb385 100644
51147--- a/drivers/platform/x86/sony-laptop.c
51148+++ b/drivers/platform/x86/sony-laptop.c
51149@@ -2526,7 +2526,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
51150 }
51151
51152 /* High speed charging function */
51153-static struct device_attribute *hsc_handle;
51154+static device_attribute_no_const *hsc_handle;
51155
51156 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
51157 struct device_attribute *attr,
51158@@ -2600,7 +2600,7 @@ static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
51159 }
51160
51161 /* low battery function */
51162-static struct device_attribute *lowbatt_handle;
51163+static device_attribute_no_const *lowbatt_handle;
51164
51165 static ssize_t sony_nc_lowbatt_store(struct device *dev,
51166 struct device_attribute *attr,
51167@@ -2666,7 +2666,7 @@ static void sony_nc_lowbatt_cleanup(struct platform_device *pd)
51168 }
51169
51170 /* fan speed function */
51171-static struct device_attribute *fan_handle, *hsf_handle;
51172+static device_attribute_no_const *fan_handle, *hsf_handle;
51173
51174 static ssize_t sony_nc_hsfan_store(struct device *dev,
51175 struct device_attribute *attr,
51176@@ -2773,7 +2773,7 @@ static void sony_nc_fanspeed_cleanup(struct platform_device *pd)
51177 }
51178
51179 /* USB charge function */
51180-static struct device_attribute *uc_handle;
51181+static device_attribute_no_const *uc_handle;
51182
51183 static ssize_t sony_nc_usb_charge_store(struct device *dev,
51184 struct device_attribute *attr,
51185@@ -2847,7 +2847,7 @@ static void sony_nc_usb_charge_cleanup(struct platform_device *pd)
51186 }
51187
51188 /* Panel ID function */
51189-static struct device_attribute *panel_handle;
51190+static device_attribute_no_const *panel_handle;
51191
51192 static ssize_t sony_nc_panelid_show(struct device *dev,
51193 struct device_attribute *attr, char *buffer)
51194@@ -2894,7 +2894,7 @@ static void sony_nc_panelid_cleanup(struct platform_device *pd)
51195 }
51196
51197 /* smart connect function */
51198-static struct device_attribute *sc_handle;
51199+static device_attribute_no_const *sc_handle;
51200
51201 static ssize_t sony_nc_smart_conn_store(struct device *dev,
51202 struct device_attribute *attr,
51203@@ -4854,7 +4854,7 @@ static struct acpi_driver sony_pic_driver = {
51204 .drv.pm = &sony_pic_pm,
51205 };
51206
51207-static struct dmi_system_id __initdata sonypi_dmi_table[] = {
51208+static const struct dmi_system_id __initconst sonypi_dmi_table[] = {
51209 {
51210 .ident = "Sony Vaio",
51211 .matches = {
51212diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
51213index 3b8ceee..e18652c 100644
51214--- a/drivers/platform/x86/thinkpad_acpi.c
51215+++ b/drivers/platform/x86/thinkpad_acpi.c
51216@@ -2093,7 +2093,7 @@ static int hotkey_mask_get(void)
51217 return 0;
51218 }
51219
51220-void static hotkey_mask_warn_incomplete_mask(void)
51221+static void hotkey_mask_warn_incomplete_mask(void)
51222 {
51223 /* log only what the user can fix... */
51224 const u32 wantedmask = hotkey_driver_mask &
51225@@ -2437,10 +2437,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
51226 && !tp_features.bright_unkfw)
51227 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
51228 }
51229+}
51230
51231 #undef TPACPI_COMPARE_KEY
51232 #undef TPACPI_MAY_SEND_KEY
51233-}
51234
51235 /*
51236 * Polling driver
51237diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
51238index 438d4c7..ca8a2fb 100644
51239--- a/drivers/pnp/pnpbios/bioscalls.c
51240+++ b/drivers/pnp/pnpbios/bioscalls.c
51241@@ -59,7 +59,7 @@ do { \
51242 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
51243 } while(0)
51244
51245-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
51246+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
51247 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
51248
51249 /*
51250@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
51251
51252 cpu = get_cpu();
51253 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
51254+
51255+ pax_open_kernel();
51256 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
51257+ pax_close_kernel();
51258
51259 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
51260 spin_lock_irqsave(&pnp_bios_lock, flags);
51261@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
51262 :"memory");
51263 spin_unlock_irqrestore(&pnp_bios_lock, flags);
51264
51265+ pax_open_kernel();
51266 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
51267+ pax_close_kernel();
51268+
51269 put_cpu();
51270
51271 /* If we get here and this is set then the PnP BIOS faulted on us. */
51272@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
51273 return status;
51274 }
51275
51276-void pnpbios_calls_init(union pnp_bios_install_struct *header)
51277+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
51278 {
51279 int i;
51280
51281@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
51282 pnp_bios_callpoint.offset = header->fields.pm16offset;
51283 pnp_bios_callpoint.segment = PNP_CS16;
51284
51285+ pax_open_kernel();
51286+
51287 for_each_possible_cpu(i) {
51288 struct desc_struct *gdt = get_cpu_gdt_table(i);
51289 if (!gdt)
51290@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
51291 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
51292 (unsigned long)__va(header->fields.pm16dseg));
51293 }
51294+
51295+ pax_close_kernel();
51296 }
51297diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
51298index facd43b..b291260 100644
51299--- a/drivers/pnp/pnpbios/core.c
51300+++ b/drivers/pnp/pnpbios/core.c
51301@@ -494,7 +494,7 @@ static int __init exploding_pnp_bios(const struct dmi_system_id *d)
51302 return 0;
51303 }
51304
51305-static struct dmi_system_id pnpbios_dmi_table[] __initdata = {
51306+static const struct dmi_system_id pnpbios_dmi_table[] __initconst = {
51307 { /* PnPBIOS GPF on boot */
51308 .callback = exploding_pnp_bios,
51309 .ident = "Higraded P14H",
51310diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
51311index 0c52e2a..3421ab7 100644
51312--- a/drivers/power/pda_power.c
51313+++ b/drivers/power/pda_power.c
51314@@ -37,7 +37,11 @@ static int polling;
51315
51316 #if IS_ENABLED(CONFIG_USB_PHY)
51317 static struct usb_phy *transceiver;
51318-static struct notifier_block otg_nb;
51319+static int otg_handle_notification(struct notifier_block *nb,
51320+ unsigned long event, void *unused);
51321+static struct notifier_block otg_nb = {
51322+ .notifier_call = otg_handle_notification
51323+};
51324 #endif
51325
51326 static struct regulator *ac_draw;
51327@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
51328
51329 #if IS_ENABLED(CONFIG_USB_PHY)
51330 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
51331- otg_nb.notifier_call = otg_handle_notification;
51332 ret = usb_register_notifier(transceiver, &otg_nb);
51333 if (ret) {
51334 dev_err(dev, "failure to register otg notifier\n");
51335diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
51336index cc439fd..8fa30df 100644
51337--- a/drivers/power/power_supply.h
51338+++ b/drivers/power/power_supply.h
51339@@ -16,12 +16,12 @@ struct power_supply;
51340
51341 #ifdef CONFIG_SYSFS
51342
51343-extern void power_supply_init_attrs(struct device_type *dev_type);
51344+extern void power_supply_init_attrs(void);
51345 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
51346
51347 #else
51348
51349-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
51350+static inline void power_supply_init_attrs(void) {}
51351 #define power_supply_uevent NULL
51352
51353 #endif /* CONFIG_SYSFS */
51354diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
51355index 694e8cd..9f03483 100644
51356--- a/drivers/power/power_supply_core.c
51357+++ b/drivers/power/power_supply_core.c
51358@@ -28,7 +28,10 @@ EXPORT_SYMBOL_GPL(power_supply_class);
51359 ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
51360 EXPORT_SYMBOL_GPL(power_supply_notifier);
51361
51362-static struct device_type power_supply_dev_type;
51363+extern const struct attribute_group *power_supply_attr_groups[];
51364+static struct device_type power_supply_dev_type = {
51365+ .groups = power_supply_attr_groups,
51366+};
51367
51368 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
51369 struct power_supply *supply)
51370@@ -637,7 +640,7 @@ static int __init power_supply_class_init(void)
51371 return PTR_ERR(power_supply_class);
51372
51373 power_supply_class->dev_uevent = power_supply_uevent;
51374- power_supply_init_attrs(&power_supply_dev_type);
51375+ power_supply_init_attrs();
51376
51377 return 0;
51378 }
51379diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
51380index 62653f5..d0bb485 100644
51381--- a/drivers/power/power_supply_sysfs.c
51382+++ b/drivers/power/power_supply_sysfs.c
51383@@ -238,17 +238,15 @@ static struct attribute_group power_supply_attr_group = {
51384 .is_visible = power_supply_attr_is_visible,
51385 };
51386
51387-static const struct attribute_group *power_supply_attr_groups[] = {
51388+const struct attribute_group *power_supply_attr_groups[] = {
51389 &power_supply_attr_group,
51390 NULL,
51391 };
51392
51393-void power_supply_init_attrs(struct device_type *dev_type)
51394+void power_supply_init_attrs(void)
51395 {
51396 int i;
51397
51398- dev_type->groups = power_supply_attr_groups;
51399-
51400 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
51401 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
51402 }
51403diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
51404index 84419af..268ede8 100644
51405--- a/drivers/powercap/powercap_sys.c
51406+++ b/drivers/powercap/powercap_sys.c
51407@@ -154,8 +154,77 @@ struct powercap_constraint_attr {
51408 struct device_attribute name_attr;
51409 };
51410
51411+static ssize_t show_constraint_name(struct device *dev,
51412+ struct device_attribute *dev_attr,
51413+ char *buf);
51414+
51415 static struct powercap_constraint_attr
51416- constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
51417+ constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
51418+ [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
51419+ .power_limit_attr = {
51420+ .attr = {
51421+ .name = NULL,
51422+ .mode = S_IWUSR | S_IRUGO
51423+ },
51424+ .show = show_constraint_power_limit_uw,
51425+ .store = store_constraint_power_limit_uw
51426+ },
51427+
51428+ .time_window_attr = {
51429+ .attr = {
51430+ .name = NULL,
51431+ .mode = S_IWUSR | S_IRUGO
51432+ },
51433+ .show = show_constraint_time_window_us,
51434+ .store = store_constraint_time_window_us
51435+ },
51436+
51437+ .max_power_attr = {
51438+ .attr = {
51439+ .name = NULL,
51440+ .mode = S_IRUGO
51441+ },
51442+ .show = show_constraint_max_power_uw,
51443+ .store = NULL
51444+ },
51445+
51446+ .min_power_attr = {
51447+ .attr = {
51448+ .name = NULL,
51449+ .mode = S_IRUGO
51450+ },
51451+ .show = show_constraint_min_power_uw,
51452+ .store = NULL
51453+ },
51454+
51455+ .max_time_window_attr = {
51456+ .attr = {
51457+ .name = NULL,
51458+ .mode = S_IRUGO
51459+ },
51460+ .show = show_constraint_max_time_window_us,
51461+ .store = NULL
51462+ },
51463+
51464+ .min_time_window_attr = {
51465+ .attr = {
51466+ .name = NULL,
51467+ .mode = S_IRUGO
51468+ },
51469+ .show = show_constraint_min_time_window_us,
51470+ .store = NULL
51471+ },
51472+
51473+ .name_attr = {
51474+ .attr = {
51475+ .name = NULL,
51476+ .mode = S_IRUGO
51477+ },
51478+ .show = show_constraint_name,
51479+ .store = NULL
51480+ }
51481+ }
51482+};
51483
51484 /* A list of powercap control_types */
51485 static LIST_HEAD(powercap_cntrl_list);
51486@@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev,
51487 }
51488
51489 static int create_constraint_attribute(int id, const char *name,
51490- int mode,
51491- struct device_attribute *dev_attr,
51492- ssize_t (*show)(struct device *,
51493- struct device_attribute *, char *),
51494- ssize_t (*store)(struct device *,
51495- struct device_attribute *,
51496- const char *, size_t)
51497- )
51498+ struct device_attribute *dev_attr)
51499 {
51500+ name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
51501
51502- dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
51503- id, name);
51504- if (!dev_attr->attr.name)
51505+ if (!name)
51506 return -ENOMEM;
51507- dev_attr->attr.mode = mode;
51508- dev_attr->show = show;
51509- dev_attr->store = store;
51510+
51511+ pax_open_kernel();
51512+ *(const char **)&dev_attr->attr.name = name;
51513+ pax_close_kernel();
51514
51515 return 0;
51516 }
51517@@ -236,49 +298,31 @@ static int seed_constraint_attributes(void)
51518
51519 for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
51520 ret = create_constraint_attribute(i, "power_limit_uw",
51521- S_IWUSR | S_IRUGO,
51522- &constraint_attrs[i].power_limit_attr,
51523- show_constraint_power_limit_uw,
51524- store_constraint_power_limit_uw);
51525+ &constraint_attrs[i].power_limit_attr);
51526 if (ret)
51527 goto err_alloc;
51528 ret = create_constraint_attribute(i, "time_window_us",
51529- S_IWUSR | S_IRUGO,
51530- &constraint_attrs[i].time_window_attr,
51531- show_constraint_time_window_us,
51532- store_constraint_time_window_us);
51533+ &constraint_attrs[i].time_window_attr);
51534 if (ret)
51535 goto err_alloc;
51536- ret = create_constraint_attribute(i, "name", S_IRUGO,
51537- &constraint_attrs[i].name_attr,
51538- show_constraint_name,
51539- NULL);
51540+ ret = create_constraint_attribute(i, "name",
51541+ &constraint_attrs[i].name_attr);
51542 if (ret)
51543 goto err_alloc;
51544- ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
51545- &constraint_attrs[i].max_power_attr,
51546- show_constraint_max_power_uw,
51547- NULL);
51548+ ret = create_constraint_attribute(i, "max_power_uw",
51549+ &constraint_attrs[i].max_power_attr);
51550 if (ret)
51551 goto err_alloc;
51552- ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
51553- &constraint_attrs[i].min_power_attr,
51554- show_constraint_min_power_uw,
51555- NULL);
51556+ ret = create_constraint_attribute(i, "min_power_uw",
51557+ &constraint_attrs[i].min_power_attr);
51558 if (ret)
51559 goto err_alloc;
51560 ret = create_constraint_attribute(i, "max_time_window_us",
51561- S_IRUGO,
51562- &constraint_attrs[i].max_time_window_attr,
51563- show_constraint_max_time_window_us,
51564- NULL);
51565+ &constraint_attrs[i].max_time_window_attr);
51566 if (ret)
51567 goto err_alloc;
51568 ret = create_constraint_attribute(i, "min_time_window_us",
51569- S_IRUGO,
51570- &constraint_attrs[i].min_time_window_attr,
51571- show_constraint_min_time_window_us,
51572- NULL);
51573+ &constraint_attrs[i].min_time_window_attr);
51574 if (ret)
51575 goto err_alloc;
51576
51577@@ -378,10 +422,12 @@ static void create_power_zone_common_attributes(
51578 power_zone->zone_dev_attrs[count++] =
51579 &dev_attr_max_energy_range_uj.attr;
51580 if (power_zone->ops->get_energy_uj) {
51581+ pax_open_kernel();
51582 if (power_zone->ops->reset_energy_uj)
51583- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
51584+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
51585 else
51586- dev_attr_energy_uj.attr.mode = S_IRUGO;
51587+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO;
51588+ pax_close_kernel();
51589 power_zone->zone_dev_attrs[count++] =
51590 &dev_attr_energy_uj.attr;
51591 }
51592diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
51593index 9c5d414..c7900ce 100644
51594--- a/drivers/ptp/ptp_private.h
51595+++ b/drivers/ptp/ptp_private.h
51596@@ -51,7 +51,7 @@ struct ptp_clock {
51597 struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
51598 wait_queue_head_t tsev_wq;
51599 int defunct; /* tells readers to go away when clock is being removed */
51600- struct device_attribute *pin_dev_attr;
51601+ device_attribute_no_const *pin_dev_attr;
51602 struct attribute **pin_attr;
51603 struct attribute_group pin_attr_group;
51604 };
51605diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
51606index 302e626..12579af 100644
51607--- a/drivers/ptp/ptp_sysfs.c
51608+++ b/drivers/ptp/ptp_sysfs.c
51609@@ -280,7 +280,7 @@ static int ptp_populate_pins(struct ptp_clock *ptp)
51610 goto no_pin_attr;
51611
51612 for (i = 0; i < n_pins; i++) {
51613- struct device_attribute *da = &ptp->pin_dev_attr[i];
51614+ device_attribute_no_const *da = &ptp->pin_dev_attr[i];
51615 sysfs_attr_init(&da->attr);
51616 da->attr.name = info->pin_config[i].name;
51617 da->attr.mode = 0644;
51618diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
51619index a4a8a6d..a3456f4 100644
51620--- a/drivers/regulator/core.c
51621+++ b/drivers/regulator/core.c
51622@@ -3529,7 +3529,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
51623 const struct regulation_constraints *constraints = NULL;
51624 const struct regulator_init_data *init_data;
51625 struct regulator_config *config = NULL;
51626- static atomic_t regulator_no = ATOMIC_INIT(-1);
51627+ static atomic_unchecked_t regulator_no = ATOMIC_INIT(-1);
51628 struct regulator_dev *rdev;
51629 struct device *dev;
51630 int ret, i;
51631@@ -3613,7 +3613,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
51632 rdev->dev.class = &regulator_class;
51633 rdev->dev.parent = dev;
51634 dev_set_name(&rdev->dev, "regulator.%lu",
51635- (unsigned long) atomic_inc_return(&regulator_no));
51636+ (unsigned long) atomic_inc_return_unchecked(&regulator_no));
51637 ret = device_register(&rdev->dev);
51638 if (ret != 0) {
51639 put_device(&rdev->dev);
51640diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
51641index 7eee2ca..4024513 100644
51642--- a/drivers/regulator/max8660.c
51643+++ b/drivers/regulator/max8660.c
51644@@ -424,8 +424,10 @@ static int max8660_probe(struct i2c_client *client,
51645 max8660->shadow_regs[MAX8660_OVER1] = 5;
51646 } else {
51647 /* Otherwise devices can be toggled via software */
51648- max8660_dcdc_ops.enable = max8660_dcdc_enable;
51649- max8660_dcdc_ops.disable = max8660_dcdc_disable;
51650+ pax_open_kernel();
51651+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
51652+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
51653+ pax_close_kernel();
51654 }
51655
51656 /*
51657diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
51658index c3d55c2..0dddfe6 100644
51659--- a/drivers/regulator/max8973-regulator.c
51660+++ b/drivers/regulator/max8973-regulator.c
51661@@ -403,9 +403,11 @@ static int max8973_probe(struct i2c_client *client,
51662 if (!pdata || !pdata->enable_ext_control) {
51663 max->desc.enable_reg = MAX8973_VOUT;
51664 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
51665- max->ops.enable = regulator_enable_regmap;
51666- max->ops.disable = regulator_disable_regmap;
51667- max->ops.is_enabled = regulator_is_enabled_regmap;
51668+ pax_open_kernel();
51669+ *(void **)&max->ops.enable = regulator_enable_regmap;
51670+ *(void **)&max->ops.disable = regulator_disable_regmap;
51671+ *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
51672+ pax_close_kernel();
51673 }
51674
51675 if (pdata) {
51676diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
51677index 0d17c92..a29f627 100644
51678--- a/drivers/regulator/mc13892-regulator.c
51679+++ b/drivers/regulator/mc13892-regulator.c
51680@@ -584,10 +584,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
51681 mc13xxx_unlock(mc13892);
51682
51683 /* update mc13892_vcam ops */
51684- memcpy(&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
51685+ pax_open_kernel();
51686+ memcpy((void *)&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
51687 sizeof(struct regulator_ops));
51688- mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
51689- mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
51690+ *(void **)&mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
51691+ *(void **)&mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
51692+ pax_close_kernel();
51693 mc13892_regulators[MC13892_VCAM].desc.ops = &mc13892_vcam_ops;
51694
51695 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
51696diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
51697index 5b2e761..c8c8a4a 100644
51698--- a/drivers/rtc/rtc-cmos.c
51699+++ b/drivers/rtc/rtc-cmos.c
51700@@ -789,7 +789,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
51701 hpet_rtc_timer_init();
51702
51703 /* export at least the first block of NVRAM */
51704- nvram.size = address_space - NVRAM_OFFSET;
51705+ pax_open_kernel();
51706+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
51707+ pax_close_kernel();
51708 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
51709 if (retval < 0) {
51710 dev_dbg(dev, "can't create nvram file? %d\n", retval);
51711diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
51712index 799c34b..8e9786a 100644
51713--- a/drivers/rtc/rtc-dev.c
51714+++ b/drivers/rtc/rtc-dev.c
51715@@ -16,6 +16,7 @@
51716 #include <linux/module.h>
51717 #include <linux/rtc.h>
51718 #include <linux/sched.h>
51719+#include <linux/grsecurity.h>
51720 #include "rtc-core.h"
51721
51722 static dev_t rtc_devt;
51723@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
51724 if (copy_from_user(&tm, uarg, sizeof(tm)))
51725 return -EFAULT;
51726
51727+ gr_log_timechange();
51728+
51729 return rtc_set_time(rtc, &tm);
51730
51731 case RTC_PIE_ON:
51732diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
51733index 4ffabb3..1f87fca 100644
51734--- a/drivers/rtc/rtc-ds1307.c
51735+++ b/drivers/rtc/rtc-ds1307.c
51736@@ -107,7 +107,7 @@ struct ds1307 {
51737 u8 offset; /* register's offset */
51738 u8 regs[11];
51739 u16 nvram_offset;
51740- struct bin_attribute *nvram;
51741+ bin_attribute_no_const *nvram;
51742 enum ds_type type;
51743 unsigned long flags;
51744 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
51745diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
51746index 90abb5b..e0bf6dd 100644
51747--- a/drivers/rtc/rtc-m48t59.c
51748+++ b/drivers/rtc/rtc-m48t59.c
51749@@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
51750 if (IS_ERR(m48t59->rtc))
51751 return PTR_ERR(m48t59->rtc);
51752
51753- m48t59_nvram_attr.size = pdata->offset;
51754+ pax_open_kernel();
51755+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
51756+ pax_close_kernel();
51757
51758 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
51759 if (ret)
51760diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
51761index e693af6..2e525b6 100644
51762--- a/drivers/scsi/bfa/bfa_fcpim.h
51763+++ b/drivers/scsi/bfa/bfa_fcpim.h
51764@@ -36,7 +36,7 @@ struct bfa_iotag_s {
51765
51766 struct bfa_itn_s {
51767 bfa_isr_func_t isr;
51768-};
51769+} __no_const;
51770
51771 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
51772 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
51773diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
51774index 0f19455..ef7adb5 100644
51775--- a/drivers/scsi/bfa/bfa_fcs.c
51776+++ b/drivers/scsi/bfa/bfa_fcs.c
51777@@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
51778 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
51779
51780 static struct bfa_fcs_mod_s fcs_modules[] = {
51781- { bfa_fcs_port_attach, NULL, NULL },
51782- { bfa_fcs_uf_attach, NULL, NULL },
51783- { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
51784- bfa_fcs_fabric_modexit },
51785+ {
51786+ .attach = bfa_fcs_port_attach,
51787+ .modinit = NULL,
51788+ .modexit = NULL
51789+ },
51790+ {
51791+ .attach = bfa_fcs_uf_attach,
51792+ .modinit = NULL,
51793+ .modexit = NULL
51794+ },
51795+ {
51796+ .attach = bfa_fcs_fabric_attach,
51797+ .modinit = bfa_fcs_fabric_modinit,
51798+ .modexit = bfa_fcs_fabric_modexit
51799+ },
51800 };
51801
51802 /*
51803diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
51804index ff75ef8..2dfe00a 100644
51805--- a/drivers/scsi/bfa/bfa_fcs_lport.c
51806+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
51807@@ -89,15 +89,26 @@ static struct {
51808 void (*offline) (struct bfa_fcs_lport_s *port);
51809 } __port_action[] = {
51810 {
51811- bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
51812- bfa_fcs_lport_unknown_offline}, {
51813- bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
51814- bfa_fcs_lport_fab_offline}, {
51815- bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
51816- bfa_fcs_lport_n2n_offline}, {
51817- bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
51818- bfa_fcs_lport_loop_offline},
51819- };
51820+ .init = bfa_fcs_lport_unknown_init,
51821+ .online = bfa_fcs_lport_unknown_online,
51822+ .offline = bfa_fcs_lport_unknown_offline
51823+ },
51824+ {
51825+ .init = bfa_fcs_lport_fab_init,
51826+ .online = bfa_fcs_lport_fab_online,
51827+ .offline = bfa_fcs_lport_fab_offline
51828+ },
51829+ {
51830+ .init = bfa_fcs_lport_n2n_init,
51831+ .online = bfa_fcs_lport_n2n_online,
51832+ .offline = bfa_fcs_lport_n2n_offline
51833+ },
51834+ {
51835+ .init = bfa_fcs_lport_loop_init,
51836+ .online = bfa_fcs_lport_loop_online,
51837+ .offline = bfa_fcs_lport_loop_offline
51838+ },
51839+};
51840
51841 /*
51842 * fcs_port_sm FCS logical port state machine
51843diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
51844index a38aafa0..fe8f03b 100644
51845--- a/drivers/scsi/bfa/bfa_ioc.h
51846+++ b/drivers/scsi/bfa/bfa_ioc.h
51847@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
51848 bfa_ioc_disable_cbfn_t disable_cbfn;
51849 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
51850 bfa_ioc_reset_cbfn_t reset_cbfn;
51851-};
51852+} __no_const;
51853
51854 /*
51855 * IOC event notification mechanism.
51856@@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
51857 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
51858 enum bfi_ioc_state fwstate);
51859 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
51860-};
51861+} __no_const;
51862
51863 /*
51864 * Queue element to wait for room in request queue. FIFO order is
51865diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
51866index a14c784..6de6790 100644
51867--- a/drivers/scsi/bfa/bfa_modules.h
51868+++ b/drivers/scsi/bfa/bfa_modules.h
51869@@ -78,12 +78,12 @@ enum {
51870 \
51871 extern struct bfa_module_s hal_mod_ ## __mod; \
51872 struct bfa_module_s hal_mod_ ## __mod = { \
51873- bfa_ ## __mod ## _meminfo, \
51874- bfa_ ## __mod ## _attach, \
51875- bfa_ ## __mod ## _detach, \
51876- bfa_ ## __mod ## _start, \
51877- bfa_ ## __mod ## _stop, \
51878- bfa_ ## __mod ## _iocdisable, \
51879+ .meminfo = bfa_ ## __mod ## _meminfo, \
51880+ .attach = bfa_ ## __mod ## _attach, \
51881+ .detach = bfa_ ## __mod ## _detach, \
51882+ .start = bfa_ ## __mod ## _start, \
51883+ .stop = bfa_ ## __mod ## _stop, \
51884+ .iocdisable = bfa_ ## __mod ## _iocdisable, \
51885 }
51886
51887 #define BFA_CACHELINE_SZ (256)
51888diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
51889index 045c4e1..13de803 100644
51890--- a/drivers/scsi/fcoe/fcoe_sysfs.c
51891+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
51892@@ -33,8 +33,8 @@
51893 */
51894 #include "libfcoe.h"
51895
51896-static atomic_t ctlr_num;
51897-static atomic_t fcf_num;
51898+static atomic_unchecked_t ctlr_num;
51899+static atomic_unchecked_t fcf_num;
51900
51901 /*
51902 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
51903@@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
51904 if (!ctlr)
51905 goto out;
51906
51907- ctlr->id = atomic_inc_return(&ctlr_num) - 1;
51908+ ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
51909 ctlr->f = f;
51910 ctlr->mode = FIP_CONN_TYPE_FABRIC;
51911 INIT_LIST_HEAD(&ctlr->fcfs);
51912@@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
51913 fcf->dev.parent = &ctlr->dev;
51914 fcf->dev.bus = &fcoe_bus_type;
51915 fcf->dev.type = &fcoe_fcf_device_type;
51916- fcf->id = atomic_inc_return(&fcf_num) - 1;
51917+ fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
51918 fcf->state = FCOE_FCF_STATE_UNKNOWN;
51919
51920 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
51921@@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void)
51922 {
51923 int error;
51924
51925- atomic_set(&ctlr_num, 0);
51926- atomic_set(&fcf_num, 0);
51927+ atomic_set_unchecked(&ctlr_num, 0);
51928+ atomic_set_unchecked(&fcf_num, 0);
51929
51930 error = bus_register(&fcoe_bus_type);
51931 if (error)
51932diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
51933index 8bb173e..20236b4 100644
51934--- a/drivers/scsi/hosts.c
51935+++ b/drivers/scsi/hosts.c
51936@@ -42,7 +42,7 @@
51937 #include "scsi_logging.h"
51938
51939
51940-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
51941+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
51942
51943
51944 static void scsi_host_cls_release(struct device *dev)
51945@@ -392,7 +392,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
51946 * subtract one because we increment first then return, but we need to
51947 * know what the next host number was before increment
51948 */
51949- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
51950+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
51951 shost->dma_channel = 0xff;
51952
51953 /* These three are default values which can be overridden */
51954diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
51955index a1cfbd3..d7f8ebc 100644
51956--- a/drivers/scsi/hpsa.c
51957+++ b/drivers/scsi/hpsa.c
51958@@ -697,10 +697,10 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
51959 struct reply_queue_buffer *rq = &h->reply_queue[q];
51960
51961 if (h->transMethod & CFGTBL_Trans_io_accel1)
51962- return h->access.command_completed(h, q);
51963+ return h->access->command_completed(h, q);
51964
51965 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
51966- return h->access.command_completed(h, q);
51967+ return h->access->command_completed(h, q);
51968
51969 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
51970 a = rq->head[rq->current_entry];
51971@@ -837,7 +837,7 @@ static void enqueue_cmd_and_start_io(struct ctlr_info *h,
51972 break;
51973 default:
51974 set_performant_mode(h, c);
51975- h->access.submit_command(h, c);
51976+ h->access->submit_command(h, c);
51977 }
51978 }
51979
51980@@ -5369,17 +5369,17 @@ static void __iomem *remap_pci_mem(ulong base, ulong size)
51981
51982 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
51983 {
51984- return h->access.command_completed(h, q);
51985+ return h->access->command_completed(h, q);
51986 }
51987
51988 static inline bool interrupt_pending(struct ctlr_info *h)
51989 {
51990- return h->access.intr_pending(h);
51991+ return h->access->intr_pending(h);
51992 }
51993
51994 static inline long interrupt_not_for_us(struct ctlr_info *h)
51995 {
51996- return (h->access.intr_pending(h) == 0) ||
51997+ return (h->access->intr_pending(h) == 0) ||
51998 (h->interrupts_enabled == 0);
51999 }
52000
52001@@ -6270,7 +6270,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
52002 if (prod_index < 0)
52003 return prod_index;
52004 h->product_name = products[prod_index].product_name;
52005- h->access = *(products[prod_index].access);
52006+ h->access = products[prod_index].access;
52007
52008 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
52009 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
52010@@ -6649,7 +6649,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
52011 unsigned long flags;
52012 u32 lockup_detected;
52013
52014- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52015+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52016 spin_lock_irqsave(&h->lock, flags);
52017 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
52018 if (!lockup_detected) {
52019@@ -6924,7 +6924,7 @@ reinit_after_soft_reset:
52020 }
52021
52022 /* make sure the board interrupts are off */
52023- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52024+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52025
52026 if (hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
52027 goto clean2;
52028@@ -6960,7 +6960,7 @@ reinit_after_soft_reset:
52029 * fake ones to scoop up any residual completions.
52030 */
52031 spin_lock_irqsave(&h->lock, flags);
52032- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52033+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52034 spin_unlock_irqrestore(&h->lock, flags);
52035 hpsa_free_irqs(h);
52036 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
52037@@ -6979,9 +6979,9 @@ reinit_after_soft_reset:
52038 dev_info(&h->pdev->dev, "Board READY.\n");
52039 dev_info(&h->pdev->dev,
52040 "Waiting for stale completions to drain.\n");
52041- h->access.set_intr_mask(h, HPSA_INTR_ON);
52042+ h->access->set_intr_mask(h, HPSA_INTR_ON);
52043 msleep(10000);
52044- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52045+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52046
52047 rc = controller_reset_failed(h->cfgtable);
52048 if (rc)
52049@@ -7006,7 +7006,7 @@ reinit_after_soft_reset:
52050
52051
52052 /* Turn the interrupts on so we can service requests */
52053- h->access.set_intr_mask(h, HPSA_INTR_ON);
52054+ h->access->set_intr_mask(h, HPSA_INTR_ON);
52055
52056 hpsa_hba_inquiry(h);
52057 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
52058@@ -7079,7 +7079,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
52059 * To write all data in the battery backed cache to disks
52060 */
52061 hpsa_flush_cache(h);
52062- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52063+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52064 hpsa_free_irqs_and_disable_msix(h);
52065 }
52066
52067@@ -7200,7 +7200,7 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52068 CFGTBL_Trans_enable_directed_msix |
52069 (trans_support & (CFGTBL_Trans_io_accel1 |
52070 CFGTBL_Trans_io_accel2));
52071- struct access_method access = SA5_performant_access;
52072+ struct access_method *access = &SA5_performant_access;
52073
52074 /* This is a bit complicated. There are 8 registers on
52075 * the controller which we write to to tell it 8 different
52076@@ -7242,7 +7242,7 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52077 * perform the superfluous readl() after each command submission.
52078 */
52079 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
52080- access = SA5_performant_access_no_read;
52081+ access = &SA5_performant_access_no_read;
52082
52083 /* Controller spec: zero out this buffer. */
52084 for (i = 0; i < h->nreply_queues; i++)
52085@@ -7272,12 +7272,12 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52086 * enable outbound interrupt coalescing in accelerator mode;
52087 */
52088 if (trans_support & CFGTBL_Trans_io_accel1) {
52089- access = SA5_ioaccel_mode1_access;
52090+ access = &SA5_ioaccel_mode1_access;
52091 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
52092 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
52093 } else {
52094 if (trans_support & CFGTBL_Trans_io_accel2) {
52095- access = SA5_ioaccel_mode2_access;
52096+ access = &SA5_ioaccel_mode2_access;
52097 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
52098 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
52099 }
52100diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
52101index 6577130..955f9a4 100644
52102--- a/drivers/scsi/hpsa.h
52103+++ b/drivers/scsi/hpsa.h
52104@@ -143,7 +143,7 @@ struct ctlr_info {
52105 unsigned int msix_vector;
52106 unsigned int msi_vector;
52107 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
52108- struct access_method access;
52109+ struct access_method *access;
52110 char hba_mode_enabled;
52111
52112 /* queue and queue Info */
52113@@ -525,38 +525,38 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
52114 }
52115
52116 static struct access_method SA5_access = {
52117- SA5_submit_command,
52118- SA5_intr_mask,
52119- SA5_intr_pending,
52120- SA5_completed,
52121+ .submit_command = SA5_submit_command,
52122+ .set_intr_mask = SA5_intr_mask,
52123+ .intr_pending = SA5_intr_pending,
52124+ .command_completed = SA5_completed,
52125 };
52126
52127 static struct access_method SA5_ioaccel_mode1_access = {
52128- SA5_submit_command,
52129- SA5_performant_intr_mask,
52130- SA5_ioaccel_mode1_intr_pending,
52131- SA5_ioaccel_mode1_completed,
52132+ .submit_command = SA5_submit_command,
52133+ .set_intr_mask = SA5_performant_intr_mask,
52134+ .intr_pending = SA5_ioaccel_mode1_intr_pending,
52135+ .command_completed = SA5_ioaccel_mode1_completed,
52136 };
52137
52138 static struct access_method SA5_ioaccel_mode2_access = {
52139- SA5_submit_command_ioaccel2,
52140- SA5_performant_intr_mask,
52141- SA5_performant_intr_pending,
52142- SA5_performant_completed,
52143+ .submit_command = SA5_submit_command_ioaccel2,
52144+ .set_intr_mask = SA5_performant_intr_mask,
52145+ .intr_pending = SA5_performant_intr_pending,
52146+ .command_completed = SA5_performant_completed,
52147 };
52148
52149 static struct access_method SA5_performant_access = {
52150- SA5_submit_command,
52151- SA5_performant_intr_mask,
52152- SA5_performant_intr_pending,
52153- SA5_performant_completed,
52154+ .submit_command = SA5_submit_command,
52155+ .set_intr_mask = SA5_performant_intr_mask,
52156+ .intr_pending = SA5_performant_intr_pending,
52157+ .command_completed = SA5_performant_completed,
52158 };
52159
52160 static struct access_method SA5_performant_access_no_read = {
52161- SA5_submit_command_no_read,
52162- SA5_performant_intr_mask,
52163- SA5_performant_intr_pending,
52164- SA5_performant_completed,
52165+ .submit_command = SA5_submit_command_no_read,
52166+ .set_intr_mask = SA5_performant_intr_mask,
52167+ .intr_pending = SA5_performant_intr_pending,
52168+ .command_completed = SA5_performant_completed,
52169 };
52170
52171 struct board_type {
52172diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
52173index 1b3a094..068e683 100644
52174--- a/drivers/scsi/libfc/fc_exch.c
52175+++ b/drivers/scsi/libfc/fc_exch.c
52176@@ -101,12 +101,12 @@ struct fc_exch_mgr {
52177 u16 pool_max_index;
52178
52179 struct {
52180- atomic_t no_free_exch;
52181- atomic_t no_free_exch_xid;
52182- atomic_t xid_not_found;
52183- atomic_t xid_busy;
52184- atomic_t seq_not_found;
52185- atomic_t non_bls_resp;
52186+ atomic_unchecked_t no_free_exch;
52187+ atomic_unchecked_t no_free_exch_xid;
52188+ atomic_unchecked_t xid_not_found;
52189+ atomic_unchecked_t xid_busy;
52190+ atomic_unchecked_t seq_not_found;
52191+ atomic_unchecked_t non_bls_resp;
52192 } stats;
52193 };
52194
52195@@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
52196 /* allocate memory for exchange */
52197 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
52198 if (!ep) {
52199- atomic_inc(&mp->stats.no_free_exch);
52200+ atomic_inc_unchecked(&mp->stats.no_free_exch);
52201 goto out;
52202 }
52203 memset(ep, 0, sizeof(*ep));
52204@@ -874,7 +874,7 @@ out:
52205 return ep;
52206 err:
52207 spin_unlock_bh(&pool->lock);
52208- atomic_inc(&mp->stats.no_free_exch_xid);
52209+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
52210 mempool_free(ep, mp->ep_pool);
52211 return NULL;
52212 }
52213@@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52214 xid = ntohs(fh->fh_ox_id); /* we originated exch */
52215 ep = fc_exch_find(mp, xid);
52216 if (!ep) {
52217- atomic_inc(&mp->stats.xid_not_found);
52218+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52219 reject = FC_RJT_OX_ID;
52220 goto out;
52221 }
52222@@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52223 ep = fc_exch_find(mp, xid);
52224 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
52225 if (ep) {
52226- atomic_inc(&mp->stats.xid_busy);
52227+ atomic_inc_unchecked(&mp->stats.xid_busy);
52228 reject = FC_RJT_RX_ID;
52229 goto rel;
52230 }
52231@@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52232 }
52233 xid = ep->xid; /* get our XID */
52234 } else if (!ep) {
52235- atomic_inc(&mp->stats.xid_not_found);
52236+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52237 reject = FC_RJT_RX_ID; /* XID not found */
52238 goto out;
52239 }
52240@@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52241 } else {
52242 sp = &ep->seq;
52243 if (sp->id != fh->fh_seq_id) {
52244- atomic_inc(&mp->stats.seq_not_found);
52245+ atomic_inc_unchecked(&mp->stats.seq_not_found);
52246 if (f_ctl & FC_FC_END_SEQ) {
52247 /*
52248 * Update sequence_id based on incoming last
52249@@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52250
52251 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
52252 if (!ep) {
52253- atomic_inc(&mp->stats.xid_not_found);
52254+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52255 goto out;
52256 }
52257 if (ep->esb_stat & ESB_ST_COMPLETE) {
52258- atomic_inc(&mp->stats.xid_not_found);
52259+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52260 goto rel;
52261 }
52262 if (ep->rxid == FC_XID_UNKNOWN)
52263 ep->rxid = ntohs(fh->fh_rx_id);
52264 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
52265- atomic_inc(&mp->stats.xid_not_found);
52266+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52267 goto rel;
52268 }
52269 if (ep->did != ntoh24(fh->fh_s_id) &&
52270 ep->did != FC_FID_FLOGI) {
52271- atomic_inc(&mp->stats.xid_not_found);
52272+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52273 goto rel;
52274 }
52275 sof = fr_sof(fp);
52276@@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52277 sp->ssb_stat |= SSB_ST_RESP;
52278 sp->id = fh->fh_seq_id;
52279 } else if (sp->id != fh->fh_seq_id) {
52280- atomic_inc(&mp->stats.seq_not_found);
52281+ atomic_inc_unchecked(&mp->stats.seq_not_found);
52282 goto rel;
52283 }
52284
52285@@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52286 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
52287
52288 if (!sp)
52289- atomic_inc(&mp->stats.xid_not_found);
52290+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52291 else
52292- atomic_inc(&mp->stats.non_bls_resp);
52293+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
52294
52295 fc_frame_free(fp);
52296 }
52297@@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
52298
52299 list_for_each_entry(ema, &lport->ema_list, ema_list) {
52300 mp = ema->mp;
52301- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
52302+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
52303 st->fc_no_free_exch_xid +=
52304- atomic_read(&mp->stats.no_free_exch_xid);
52305- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
52306- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
52307- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
52308- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
52309+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
52310+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
52311+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
52312+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
52313+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
52314 }
52315 }
52316 EXPORT_SYMBOL(fc_exch_update_stats);
52317diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
52318index 9c706d8..d3e3ed2 100644
52319--- a/drivers/scsi/libsas/sas_ata.c
52320+++ b/drivers/scsi/libsas/sas_ata.c
52321@@ -535,7 +535,7 @@ static struct ata_port_operations sas_sata_ops = {
52322 .postreset = ata_std_postreset,
52323 .error_handler = ata_std_error_handler,
52324 .post_internal_cmd = sas_ata_post_internal,
52325- .qc_defer = ata_std_qc_defer,
52326+ .qc_defer = ata_std_qc_defer,
52327 .qc_prep = ata_noop_qc_prep,
52328 .qc_issue = sas_ata_qc_issue,
52329 .qc_fill_rtf = sas_ata_qc_fill_rtf,
52330diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
52331index 434e903..5a4a79b 100644
52332--- a/drivers/scsi/lpfc/lpfc.h
52333+++ b/drivers/scsi/lpfc/lpfc.h
52334@@ -430,7 +430,7 @@ struct lpfc_vport {
52335 struct dentry *debug_nodelist;
52336 struct dentry *vport_debugfs_root;
52337 struct lpfc_debugfs_trc *disc_trc;
52338- atomic_t disc_trc_cnt;
52339+ atomic_unchecked_t disc_trc_cnt;
52340 #endif
52341 uint8_t stat_data_enabled;
52342 uint8_t stat_data_blocked;
52343@@ -880,8 +880,8 @@ struct lpfc_hba {
52344 struct timer_list fabric_block_timer;
52345 unsigned long bit_flags;
52346 #define FABRIC_COMANDS_BLOCKED 0
52347- atomic_t num_rsrc_err;
52348- atomic_t num_cmd_success;
52349+ atomic_unchecked_t num_rsrc_err;
52350+ atomic_unchecked_t num_cmd_success;
52351 unsigned long last_rsrc_error_time;
52352 unsigned long last_ramp_down_time;
52353 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
52354@@ -916,7 +916,7 @@ struct lpfc_hba {
52355
52356 struct dentry *debug_slow_ring_trc;
52357 struct lpfc_debugfs_trc *slow_ring_trc;
52358- atomic_t slow_ring_trc_cnt;
52359+ atomic_unchecked_t slow_ring_trc_cnt;
52360 /* iDiag debugfs sub-directory */
52361 struct dentry *idiag_root;
52362 struct dentry *idiag_pci_cfg;
52363diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
52364index 5633e7d..8272114 100644
52365--- a/drivers/scsi/lpfc/lpfc_debugfs.c
52366+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
52367@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
52368
52369 #include <linux/debugfs.h>
52370
52371-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
52372+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
52373 static unsigned long lpfc_debugfs_start_time = 0L;
52374
52375 /* iDiag */
52376@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
52377 lpfc_debugfs_enable = 0;
52378
52379 len = 0;
52380- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
52381+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
52382 (lpfc_debugfs_max_disc_trc - 1);
52383 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
52384 dtp = vport->disc_trc + i;
52385@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
52386 lpfc_debugfs_enable = 0;
52387
52388 len = 0;
52389- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
52390+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
52391 (lpfc_debugfs_max_slow_ring_trc - 1);
52392 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
52393 dtp = phba->slow_ring_trc + i;
52394@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
52395 !vport || !vport->disc_trc)
52396 return;
52397
52398- index = atomic_inc_return(&vport->disc_trc_cnt) &
52399+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
52400 (lpfc_debugfs_max_disc_trc - 1);
52401 dtp = vport->disc_trc + index;
52402 dtp->fmt = fmt;
52403 dtp->data1 = data1;
52404 dtp->data2 = data2;
52405 dtp->data3 = data3;
52406- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
52407+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
52408 dtp->jif = jiffies;
52409 #endif
52410 return;
52411@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
52412 !phba || !phba->slow_ring_trc)
52413 return;
52414
52415- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
52416+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
52417 (lpfc_debugfs_max_slow_ring_trc - 1);
52418 dtp = phba->slow_ring_trc + index;
52419 dtp->fmt = fmt;
52420 dtp->data1 = data1;
52421 dtp->data2 = data2;
52422 dtp->data3 = data3;
52423- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
52424+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
52425 dtp->jif = jiffies;
52426 #endif
52427 return;
52428@@ -4268,7 +4268,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
52429 "slow_ring buffer\n");
52430 goto debug_failed;
52431 }
52432- atomic_set(&phba->slow_ring_trc_cnt, 0);
52433+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
52434 memset(phba->slow_ring_trc, 0,
52435 (sizeof(struct lpfc_debugfs_trc) *
52436 lpfc_debugfs_max_slow_ring_trc));
52437@@ -4314,7 +4314,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
52438 "buffer\n");
52439 goto debug_failed;
52440 }
52441- atomic_set(&vport->disc_trc_cnt, 0);
52442+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
52443
52444 snprintf(name, sizeof(name), "discovery_trace");
52445 vport->debug_disc_trc =
52446diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
52447index 0b2c53a..aec2b45 100644
52448--- a/drivers/scsi/lpfc/lpfc_init.c
52449+++ b/drivers/scsi/lpfc/lpfc_init.c
52450@@ -11290,8 +11290,10 @@ lpfc_init(void)
52451 "misc_register returned with status %d", error);
52452
52453 if (lpfc_enable_npiv) {
52454- lpfc_transport_functions.vport_create = lpfc_vport_create;
52455- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
52456+ pax_open_kernel();
52457+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
52458+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
52459+ pax_close_kernel();
52460 }
52461 lpfc_transport_template =
52462 fc_attach_transport(&lpfc_transport_functions);
52463diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
52464index 4f9222e..f1850e3 100644
52465--- a/drivers/scsi/lpfc/lpfc_scsi.c
52466+++ b/drivers/scsi/lpfc/lpfc_scsi.c
52467@@ -261,7 +261,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
52468 unsigned long expires;
52469
52470 spin_lock_irqsave(&phba->hbalock, flags);
52471- atomic_inc(&phba->num_rsrc_err);
52472+ atomic_inc_unchecked(&phba->num_rsrc_err);
52473 phba->last_rsrc_error_time = jiffies;
52474
52475 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
52476@@ -303,8 +303,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
52477 unsigned long num_rsrc_err, num_cmd_success;
52478 int i;
52479
52480- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
52481- num_cmd_success = atomic_read(&phba->num_cmd_success);
52482+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
52483+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
52484
52485 /*
52486 * The error and success command counters are global per
52487@@ -331,8 +331,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
52488 }
52489 }
52490 lpfc_destroy_vport_work_array(phba, vports);
52491- atomic_set(&phba->num_rsrc_err, 0);
52492- atomic_set(&phba->num_cmd_success, 0);
52493+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
52494+ atomic_set_unchecked(&phba->num_cmd_success, 0);
52495 }
52496
52497 /**
52498diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52499index 3f26147..ee8efd1 100644
52500--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52501+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52502@@ -1509,7 +1509,7 @@ _scsih_get_resync(struct device *dev)
52503 {
52504 struct scsi_device *sdev = to_scsi_device(dev);
52505 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
52506- static struct _raid_device *raid_device;
52507+ struct _raid_device *raid_device;
52508 unsigned long flags;
52509 Mpi2RaidVolPage0_t vol_pg0;
52510 Mpi2ConfigReply_t mpi_reply;
52511@@ -1561,7 +1561,7 @@ _scsih_get_state(struct device *dev)
52512 {
52513 struct scsi_device *sdev = to_scsi_device(dev);
52514 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
52515- static struct _raid_device *raid_device;
52516+ struct _raid_device *raid_device;
52517 unsigned long flags;
52518 Mpi2RaidVolPage0_t vol_pg0;
52519 Mpi2ConfigReply_t mpi_reply;
52520@@ -6641,7 +6641,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
52521 Mpi2EventDataIrOperationStatus_t *event_data =
52522 (Mpi2EventDataIrOperationStatus_t *)
52523 fw_event->event_data;
52524- static struct _raid_device *raid_device;
52525+ struct _raid_device *raid_device;
52526 unsigned long flags;
52527 u16 handle;
52528
52529@@ -7112,7 +7112,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
52530 u64 sas_address;
52531 struct _sas_device *sas_device;
52532 struct _sas_node *expander_device;
52533- static struct _raid_device *raid_device;
52534+ struct _raid_device *raid_device;
52535 u8 retry_count;
52536 unsigned long flags;
52537
52538diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
52539index ed31d8c..ab856b3 100644
52540--- a/drivers/scsi/pmcraid.c
52541+++ b/drivers/scsi/pmcraid.c
52542@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
52543 res->scsi_dev = scsi_dev;
52544 scsi_dev->hostdata = res;
52545 res->change_detected = 0;
52546- atomic_set(&res->read_failures, 0);
52547- atomic_set(&res->write_failures, 0);
52548+ atomic_set_unchecked(&res->read_failures, 0);
52549+ atomic_set_unchecked(&res->write_failures, 0);
52550 rc = 0;
52551 }
52552 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
52553@@ -2640,9 +2640,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
52554
52555 /* If this was a SCSI read/write command keep count of errors */
52556 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
52557- atomic_inc(&res->read_failures);
52558+ atomic_inc_unchecked(&res->read_failures);
52559 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
52560- atomic_inc(&res->write_failures);
52561+ atomic_inc_unchecked(&res->write_failures);
52562
52563 if (!RES_IS_GSCSI(res->cfg_entry) &&
52564 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
52565@@ -3468,7 +3468,7 @@ static int pmcraid_queuecommand_lck(
52566 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
52567 * hrrq_id assigned here in queuecommand
52568 */
52569- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
52570+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
52571 pinstance->num_hrrq;
52572 cmd->cmd_done = pmcraid_io_done;
52573
52574@@ -3782,7 +3782,7 @@ static long pmcraid_ioctl_passthrough(
52575 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
52576 * hrrq_id assigned here in queuecommand
52577 */
52578- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
52579+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
52580 pinstance->num_hrrq;
52581
52582 if (request_size) {
52583@@ -4420,7 +4420,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
52584
52585 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
52586 /* add resources only after host is added into system */
52587- if (!atomic_read(&pinstance->expose_resources))
52588+ if (!atomic_read_unchecked(&pinstance->expose_resources))
52589 return;
52590
52591 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
52592@@ -5237,8 +5237,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
52593 init_waitqueue_head(&pinstance->reset_wait_q);
52594
52595 atomic_set(&pinstance->outstanding_cmds, 0);
52596- atomic_set(&pinstance->last_message_id, 0);
52597- atomic_set(&pinstance->expose_resources, 0);
52598+ atomic_set_unchecked(&pinstance->last_message_id, 0);
52599+ atomic_set_unchecked(&pinstance->expose_resources, 0);
52600
52601 INIT_LIST_HEAD(&pinstance->free_res_q);
52602 INIT_LIST_HEAD(&pinstance->used_res_q);
52603@@ -5951,7 +5951,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
52604 /* Schedule worker thread to handle CCN and take care of adding and
52605 * removing devices to OS
52606 */
52607- atomic_set(&pinstance->expose_resources, 1);
52608+ atomic_set_unchecked(&pinstance->expose_resources, 1);
52609 schedule_work(&pinstance->worker_q);
52610 return rc;
52611
52612diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
52613index e1d150f..6c6df44 100644
52614--- a/drivers/scsi/pmcraid.h
52615+++ b/drivers/scsi/pmcraid.h
52616@@ -748,7 +748,7 @@ struct pmcraid_instance {
52617 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
52618
52619 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
52620- atomic_t last_message_id;
52621+ atomic_unchecked_t last_message_id;
52622
52623 /* configuration table */
52624 struct pmcraid_config_table *cfg_table;
52625@@ -777,7 +777,7 @@ struct pmcraid_instance {
52626 atomic_t outstanding_cmds;
52627
52628 /* should add/delete resources to mid-layer now ?*/
52629- atomic_t expose_resources;
52630+ atomic_unchecked_t expose_resources;
52631
52632
52633
52634@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
52635 struct pmcraid_config_table_entry_ext cfg_entry_ext;
52636 };
52637 struct scsi_device *scsi_dev; /* Link scsi_device structure */
52638- atomic_t read_failures; /* count of failed READ commands */
52639- atomic_t write_failures; /* count of failed WRITE commands */
52640+ atomic_unchecked_t read_failures; /* count of failed READ commands */
52641+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
52642
52643 /* To indicate add/delete/modify during CCN */
52644 u8 change_detected;
52645diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
52646index 82b92c4..3178171 100644
52647--- a/drivers/scsi/qla2xxx/qla_attr.c
52648+++ b/drivers/scsi/qla2xxx/qla_attr.c
52649@@ -2192,7 +2192,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
52650 return 0;
52651 }
52652
52653-struct fc_function_template qla2xxx_transport_functions = {
52654+fc_function_template_no_const qla2xxx_transport_functions = {
52655
52656 .show_host_node_name = 1,
52657 .show_host_port_name = 1,
52658@@ -2240,7 +2240,7 @@ struct fc_function_template qla2xxx_transport_functions = {
52659 .bsg_timeout = qla24xx_bsg_timeout,
52660 };
52661
52662-struct fc_function_template qla2xxx_transport_vport_functions = {
52663+fc_function_template_no_const qla2xxx_transport_vport_functions = {
52664
52665 .show_host_node_name = 1,
52666 .show_host_port_name = 1,
52667diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
52668index 7686bfe..4710893 100644
52669--- a/drivers/scsi/qla2xxx/qla_gbl.h
52670+++ b/drivers/scsi/qla2xxx/qla_gbl.h
52671@@ -571,8 +571,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
52672 struct device_attribute;
52673 extern struct device_attribute *qla2x00_host_attrs[];
52674 struct fc_function_template;
52675-extern struct fc_function_template qla2xxx_transport_functions;
52676-extern struct fc_function_template qla2xxx_transport_vport_functions;
52677+extern fc_function_template_no_const qla2xxx_transport_functions;
52678+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
52679 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
52680 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
52681 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
52682diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
52683index cce1cbc..5b9f0fe 100644
52684--- a/drivers/scsi/qla2xxx/qla_os.c
52685+++ b/drivers/scsi/qla2xxx/qla_os.c
52686@@ -1435,8 +1435,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
52687 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
52688 /* Ok, a 64bit DMA mask is applicable. */
52689 ha->flags.enable_64bit_addressing = 1;
52690- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
52691- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
52692+ pax_open_kernel();
52693+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
52694+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
52695+ pax_close_kernel();
52696 return;
52697 }
52698 }
52699diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
52700index 8f6d0fb..1b21097 100644
52701--- a/drivers/scsi/qla4xxx/ql4_def.h
52702+++ b/drivers/scsi/qla4xxx/ql4_def.h
52703@@ -305,7 +305,7 @@ struct ddb_entry {
52704 * (4000 only) */
52705 atomic_t relogin_timer; /* Max Time to wait for
52706 * relogin to complete */
52707- atomic_t relogin_retry_count; /* Num of times relogin has been
52708+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
52709 * retried */
52710 uint32_t default_time2wait; /* Default Min time between
52711 * relogins (+aens) */
52712diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
52713index 6d25879..3031a9f 100644
52714--- a/drivers/scsi/qla4xxx/ql4_os.c
52715+++ b/drivers/scsi/qla4xxx/ql4_os.c
52716@@ -4491,12 +4491,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
52717 */
52718 if (!iscsi_is_session_online(cls_sess)) {
52719 /* Reset retry relogin timer */
52720- atomic_inc(&ddb_entry->relogin_retry_count);
52721+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
52722 DEBUG2(ql4_printk(KERN_INFO, ha,
52723 "%s: index[%d] relogin timed out-retrying"
52724 " relogin (%d), retry (%d)\n", __func__,
52725 ddb_entry->fw_ddb_index,
52726- atomic_read(&ddb_entry->relogin_retry_count),
52727+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
52728 ddb_entry->default_time2wait + 4));
52729 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
52730 atomic_set(&ddb_entry->retry_relogin_timer,
52731@@ -6604,7 +6604,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
52732
52733 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
52734 atomic_set(&ddb_entry->relogin_timer, 0);
52735- atomic_set(&ddb_entry->relogin_retry_count, 0);
52736+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
52737 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
52738 ddb_entry->default_relogin_timeout =
52739 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
52740diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
52741index b1a2631..5bcd9c8 100644
52742--- a/drivers/scsi/scsi_lib.c
52743+++ b/drivers/scsi/scsi_lib.c
52744@@ -1597,7 +1597,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
52745 shost = sdev->host;
52746 scsi_init_cmd_errh(cmd);
52747 cmd->result = DID_NO_CONNECT << 16;
52748- atomic_inc(&cmd->device->iorequest_cnt);
52749+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
52750
52751 /*
52752 * SCSI request completion path will do scsi_device_unbusy(),
52753@@ -1620,9 +1620,9 @@ static void scsi_softirq_done(struct request *rq)
52754
52755 INIT_LIST_HEAD(&cmd->eh_entry);
52756
52757- atomic_inc(&cmd->device->iodone_cnt);
52758+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
52759 if (cmd->result)
52760- atomic_inc(&cmd->device->ioerr_cnt);
52761+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
52762
52763 disposition = scsi_decide_disposition(cmd);
52764 if (disposition != SUCCESS &&
52765@@ -1663,7 +1663,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
52766 struct Scsi_Host *host = cmd->device->host;
52767 int rtn = 0;
52768
52769- atomic_inc(&cmd->device->iorequest_cnt);
52770+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
52771
52772 /* check if the device is still usable */
52773 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
52774diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
52775index 1ac38e7..6acc656 100644
52776--- a/drivers/scsi/scsi_sysfs.c
52777+++ b/drivers/scsi/scsi_sysfs.c
52778@@ -788,7 +788,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
52779 char *buf) \
52780 { \
52781 struct scsi_device *sdev = to_scsi_device(dev); \
52782- unsigned long long count = atomic_read(&sdev->field); \
52783+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
52784 return snprintf(buf, 20, "0x%llx\n", count); \
52785 } \
52786 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
52787diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
52788index 5d6f348..18778a6b 100644
52789--- a/drivers/scsi/scsi_transport_fc.c
52790+++ b/drivers/scsi/scsi_transport_fc.c
52791@@ -501,7 +501,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
52792 * Netlink Infrastructure
52793 */
52794
52795-static atomic_t fc_event_seq;
52796+static atomic_unchecked_t fc_event_seq;
52797
52798 /**
52799 * fc_get_event_number - Obtain the next sequential FC event number
52800@@ -514,7 +514,7 @@ static atomic_t fc_event_seq;
52801 u32
52802 fc_get_event_number(void)
52803 {
52804- return atomic_add_return(1, &fc_event_seq);
52805+ return atomic_add_return_unchecked(1, &fc_event_seq);
52806 }
52807 EXPORT_SYMBOL(fc_get_event_number);
52808
52809@@ -658,7 +658,7 @@ static __init int fc_transport_init(void)
52810 {
52811 int error;
52812
52813- atomic_set(&fc_event_seq, 0);
52814+ atomic_set_unchecked(&fc_event_seq, 0);
52815
52816 error = transport_class_register(&fc_host_class);
52817 if (error)
52818@@ -848,7 +848,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
52819 char *cp;
52820
52821 *val = simple_strtoul(buf, &cp, 0);
52822- if ((*cp && (*cp != '\n')) || (*val < 0))
52823+ if (*cp && (*cp != '\n'))
52824 return -EINVAL;
52825 /*
52826 * Check for overflow; dev_loss_tmo is u32
52827diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
52828index 67d43e3..8cee73c 100644
52829--- a/drivers/scsi/scsi_transport_iscsi.c
52830+++ b/drivers/scsi/scsi_transport_iscsi.c
52831@@ -79,7 +79,7 @@ struct iscsi_internal {
52832 struct transport_container session_cont;
52833 };
52834
52835-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
52836+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
52837 static struct workqueue_struct *iscsi_eh_timer_workq;
52838
52839 static DEFINE_IDA(iscsi_sess_ida);
52840@@ -2071,7 +2071,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
52841 int err;
52842
52843 ihost = shost->shost_data;
52844- session->sid = atomic_add_return(1, &iscsi_session_nr);
52845+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
52846
52847 if (target_id == ISCSI_MAX_TARGET) {
52848 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
52849@@ -4515,7 +4515,7 @@ static __init int iscsi_transport_init(void)
52850 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
52851 ISCSI_TRANSPORT_VERSION);
52852
52853- atomic_set(&iscsi_session_nr, 0);
52854+ atomic_set_unchecked(&iscsi_session_nr, 0);
52855
52856 err = class_register(&iscsi_transport_class);
52857 if (err)
52858diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
52859index ae45bd9..c32a586 100644
52860--- a/drivers/scsi/scsi_transport_srp.c
52861+++ b/drivers/scsi/scsi_transport_srp.c
52862@@ -35,7 +35,7 @@
52863 #include "scsi_priv.h"
52864
52865 struct srp_host_attrs {
52866- atomic_t next_port_id;
52867+ atomic_unchecked_t next_port_id;
52868 };
52869 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
52870
52871@@ -100,7 +100,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
52872 struct Scsi_Host *shost = dev_to_shost(dev);
52873 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
52874
52875- atomic_set(&srp_host->next_port_id, 0);
52876+ atomic_set_unchecked(&srp_host->next_port_id, 0);
52877 return 0;
52878 }
52879
52880@@ -734,7 +734,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
52881 rport_fast_io_fail_timedout);
52882 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
52883
52884- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
52885+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
52886 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
52887
52888 transport_setup_device(&rport->dev);
52889diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
52890index 3290a3e..d65ac1c 100644
52891--- a/drivers/scsi/sd.c
52892+++ b/drivers/scsi/sd.c
52893@@ -3006,7 +3006,7 @@ static int sd_probe(struct device *dev)
52894 sdkp->disk = gd;
52895 sdkp->index = index;
52896 atomic_set(&sdkp->openers, 0);
52897- atomic_set(&sdkp->device->ioerr_cnt, 0);
52898+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
52899
52900 if (!sdp->request_queue->rq_timeout) {
52901 if (sdp->type != TYPE_MOD)
52902diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
52903index 2270bd5..98408a5 100644
52904--- a/drivers/scsi/sg.c
52905+++ b/drivers/scsi/sg.c
52906@@ -1083,7 +1083,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
52907 sdp->disk->disk_name,
52908 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
52909 NULL,
52910- (char *)arg);
52911+ (char __user *)arg);
52912 case BLKTRACESTART:
52913 return blk_trace_startstop(sdp->device->request_queue, 1);
52914 case BLKTRACESTOP:
52915diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
52916index c0d660f..24a5854 100644
52917--- a/drivers/soc/tegra/fuse/fuse-tegra.c
52918+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
52919@@ -71,7 +71,7 @@ static ssize_t fuse_read(struct file *fd, struct kobject *kobj,
52920 return i;
52921 }
52922
52923-static struct bin_attribute fuse_bin_attr = {
52924+static bin_attribute_no_const fuse_bin_attr = {
52925 .attr = { .name = "fuse", .mode = S_IRUGO, },
52926 .read = fuse_read,
52927 };
52928diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
52929index 57a1950..ae54e21 100644
52930--- a/drivers/spi/spi.c
52931+++ b/drivers/spi/spi.c
52932@@ -2307,7 +2307,7 @@ int spi_bus_unlock(struct spi_master *master)
52933 EXPORT_SYMBOL_GPL(spi_bus_unlock);
52934
52935 /* portable code must never pass more than 32 bytes */
52936-#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
52937+#define SPI_BUFSIZ max(32UL, SMP_CACHE_BYTES)
52938
52939 static u8 *buf;
52940
52941diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
52942index b41429f..2de5373 100644
52943--- a/drivers/staging/android/timed_output.c
52944+++ b/drivers/staging/android/timed_output.c
52945@@ -25,7 +25,7 @@
52946 #include "timed_output.h"
52947
52948 static struct class *timed_output_class;
52949-static atomic_t device_count;
52950+static atomic_unchecked_t device_count;
52951
52952 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
52953 char *buf)
52954@@ -65,7 +65,7 @@ static int create_timed_output_class(void)
52955 timed_output_class = class_create(THIS_MODULE, "timed_output");
52956 if (IS_ERR(timed_output_class))
52957 return PTR_ERR(timed_output_class);
52958- atomic_set(&device_count, 0);
52959+ atomic_set_unchecked(&device_count, 0);
52960 timed_output_class->dev_groups = timed_output_groups;
52961 }
52962
52963@@ -83,7 +83,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
52964 if (ret < 0)
52965 return ret;
52966
52967- tdev->index = atomic_inc_return(&device_count);
52968+ tdev->index = atomic_inc_return_unchecked(&device_count);
52969 tdev->dev = device_create(timed_output_class, NULL,
52970 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
52971 if (IS_ERR(tdev->dev))
52972diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
52973index 727640e..55bf61c 100644
52974--- a/drivers/staging/comedi/comedi_fops.c
52975+++ b/drivers/staging/comedi/comedi_fops.c
52976@@ -297,8 +297,8 @@ static void comedi_file_reset(struct file *file)
52977 }
52978 cfp->last_attached = dev->attached;
52979 cfp->last_detach_count = dev->detach_count;
52980- ACCESS_ONCE(cfp->read_subdev) = read_s;
52981- ACCESS_ONCE(cfp->write_subdev) = write_s;
52982+ ACCESS_ONCE_RW(cfp->read_subdev) = read_s;
52983+ ACCESS_ONCE_RW(cfp->write_subdev) = write_s;
52984 }
52985
52986 static void comedi_file_check(struct file *file)
52987@@ -1924,7 +1924,7 @@ static int do_setrsubd_ioctl(struct comedi_device *dev, unsigned long arg,
52988 !(s_old->async->cmd.flags & CMDF_WRITE))
52989 return -EBUSY;
52990
52991- ACCESS_ONCE(cfp->read_subdev) = s_new;
52992+ ACCESS_ONCE_RW(cfp->read_subdev) = s_new;
52993 return 0;
52994 }
52995
52996@@ -1966,7 +1966,7 @@ static int do_setwsubd_ioctl(struct comedi_device *dev, unsigned long arg,
52997 (s_old->async->cmd.flags & CMDF_WRITE))
52998 return -EBUSY;
52999
53000- ACCESS_ONCE(cfp->write_subdev) = s_new;
53001+ ACCESS_ONCE_RW(cfp->write_subdev) = s_new;
53002 return 0;
53003 }
53004
53005diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
53006index 37dcf7e..f3c2016 100644
53007--- a/drivers/staging/fbtft/fbtft-core.c
53008+++ b/drivers/staging/fbtft/fbtft-core.c
53009@@ -689,7 +689,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
53010 {
53011 struct fb_info *info;
53012 struct fbtft_par *par;
53013- struct fb_ops *fbops = NULL;
53014+ fb_ops_no_const *fbops = NULL;
53015 struct fb_deferred_io *fbdefio = NULL;
53016 struct fbtft_platform_data *pdata = dev->platform_data;
53017 u8 *vmem = NULL;
53018diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
53019index 0dbf3f9..fed0063 100644
53020--- a/drivers/staging/fbtft/fbtft.h
53021+++ b/drivers/staging/fbtft/fbtft.h
53022@@ -106,7 +106,7 @@ struct fbtft_ops {
53023
53024 int (*set_var)(struct fbtft_par *par);
53025 int (*set_gamma)(struct fbtft_par *par, unsigned long *curves);
53026-};
53027+} __no_const;
53028
53029 /**
53030 * struct fbtft_display - Describes the display properties
53031diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
53032index 001348c..cfaac8a 100644
53033--- a/drivers/staging/gdm724x/gdm_tty.c
53034+++ b/drivers/staging/gdm724x/gdm_tty.c
53035@@ -44,7 +44,7 @@
53036 #define gdm_tty_send_control(n, r, v, d, l) (\
53037 n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
53038
53039-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
53040+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
53041
53042 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
53043 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
53044diff --git a/drivers/staging/i2o/i2o.h b/drivers/staging/i2o/i2o.h
53045index d23c3c2..eb63c81 100644
53046--- a/drivers/staging/i2o/i2o.h
53047+++ b/drivers/staging/i2o/i2o.h
53048@@ -565,7 +565,7 @@ struct i2o_controller {
53049 struct i2o_device *exec; /* Executive */
53050 #if BITS_PER_LONG == 64
53051 spinlock_t context_list_lock; /* lock for context_list */
53052- atomic_t context_list_counter; /* needed for unique contexts */
53053+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
53054 struct list_head context_list; /* list of context id's
53055 and pointers */
53056 #endif
53057diff --git a/drivers/staging/i2o/i2o_proc.c b/drivers/staging/i2o/i2o_proc.c
53058index ad84f33..c5bdf65 100644
53059--- a/drivers/staging/i2o/i2o_proc.c
53060+++ b/drivers/staging/i2o/i2o_proc.c
53061@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
53062 "Array Controller Device"
53063 };
53064
53065-static char *chtostr(char *tmp, u8 *chars, int n)
53066-{
53067- tmp[0] = 0;
53068- return strncat(tmp, (char *)chars, n);
53069-}
53070-
53071 static int i2o_report_query_status(struct seq_file *seq, int block_status,
53072 char *group)
53073 {
53074@@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
53075 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
53076 {
53077 struct i2o_controller *c = (struct i2o_controller *)seq->private;
53078- static u32 work32[5];
53079- static u8 *work8 = (u8 *) work32;
53080- static u16 *work16 = (u16 *) work32;
53081+ u32 work32[5];
53082+ u8 *work8 = (u8 *) work32;
53083+ u16 *work16 = (u16 *) work32;
53084 int token;
53085 u32 hwcap;
53086
53087@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
53088 } *result;
53089
53090 i2o_exec_execute_ddm_table ddm_table;
53091- char tmp[28 + 1];
53092
53093 result = kmalloc(sizeof(*result), GFP_KERNEL);
53094 if (!result)
53095@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
53096
53097 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
53098 seq_printf(seq, "%-#8x", ddm_table.module_id);
53099- seq_printf(seq, "%-29s",
53100- chtostr(tmp, ddm_table.module_name_version, 28));
53101+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
53102 seq_printf(seq, "%9d ", ddm_table.data_size);
53103 seq_printf(seq, "%8d", ddm_table.code_size);
53104
53105@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
53106
53107 i2o_driver_result_table *result;
53108 i2o_driver_store_table *dst;
53109- char tmp[28 + 1];
53110
53111 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
53112 if (result == NULL)
53113@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
53114
53115 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
53116 seq_printf(seq, "%-#8x", dst->module_id);
53117- seq_printf(seq, "%-29s",
53118- chtostr(tmp, dst->module_name_version, 28));
53119- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
53120+ seq_printf(seq, "%-.28s", dst->module_name_version);
53121+ seq_printf(seq, "%-.8s", dst->date);
53122 seq_printf(seq, "%8d ", dst->module_size);
53123 seq_printf(seq, "%8d ", dst->mpb_size);
53124 seq_printf(seq, "0x%04x", dst->module_flags);
53125@@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
53126 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
53127 {
53128 struct i2o_device *d = (struct i2o_device *)seq->private;
53129- static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
53130+ u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
53131 // == (allow) 512d bytes (max)
53132- static u16 *work16 = (u16 *) work32;
53133+ u16 *work16 = (u16 *) work32;
53134 int token;
53135- char tmp[16 + 1];
53136
53137 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
53138
53139@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
53140 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
53141 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
53142 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
53143- seq_printf(seq, "Vendor info : %s\n",
53144- chtostr(tmp, (u8 *) (work32 + 2), 16));
53145- seq_printf(seq, "Product info : %s\n",
53146- chtostr(tmp, (u8 *) (work32 + 6), 16));
53147- seq_printf(seq, "Description : %s\n",
53148- chtostr(tmp, (u8 *) (work32 + 10), 16));
53149- seq_printf(seq, "Product rev. : %s\n",
53150- chtostr(tmp, (u8 *) (work32 + 14), 8));
53151+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
53152+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
53153+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
53154+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
53155
53156 seq_printf(seq, "Serial number : ");
53157 print_serial_number(seq, (u8 *) (work32 + 16),
53158@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
53159 u8 pad[256]; // allow up to 256 byte (max) serial number
53160 } result;
53161
53162- char tmp[24 + 1];
53163-
53164 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
53165
53166 if (token < 0) {
53167@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
53168 }
53169
53170 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
53171- seq_printf(seq, "Module name : %s\n",
53172- chtostr(tmp, result.module_name, 24));
53173- seq_printf(seq, "Module revision : %s\n",
53174- chtostr(tmp, result.module_rev, 8));
53175+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
53176+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
53177
53178 seq_printf(seq, "Serial number : ");
53179 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
53180@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
53181 u8 instance_number[4];
53182 } result;
53183
53184- char tmp[64 + 1];
53185-
53186 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
53187
53188 if (token < 0) {
53189@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
53190 return 0;
53191 }
53192
53193- seq_printf(seq, "Device name : %s\n",
53194- chtostr(tmp, result.device_name, 64));
53195- seq_printf(seq, "Service name : %s\n",
53196- chtostr(tmp, result.service_name, 64));
53197- seq_printf(seq, "Physical name : %s\n",
53198- chtostr(tmp, result.physical_location, 64));
53199- seq_printf(seq, "Instance number : %s\n",
53200- chtostr(tmp, result.instance_number, 4));
53201+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
53202+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
53203+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
53204+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
53205
53206 return 0;
53207 }
53208@@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
53209 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
53210 {
53211 struct i2o_device *d = (struct i2o_device *)seq->private;
53212- static u32 work32[12];
53213- static u16 *work16 = (u16 *) work32;
53214- static u8 *work8 = (u8 *) work32;
53215+ u32 work32[12];
53216+ u16 *work16 = (u16 *) work32;
53217+ u8 *work8 = (u8 *) work32;
53218 int token;
53219
53220 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
53221diff --git a/drivers/staging/i2o/iop.c b/drivers/staging/i2o/iop.c
53222index 52334fc..d7f40b3 100644
53223--- a/drivers/staging/i2o/iop.c
53224+++ b/drivers/staging/i2o/iop.c
53225@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
53226
53227 spin_lock_irqsave(&c->context_list_lock, flags);
53228
53229- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
53230- atomic_inc(&c->context_list_counter);
53231+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
53232+ atomic_inc_unchecked(&c->context_list_counter);
53233
53234- entry->context = atomic_read(&c->context_list_counter);
53235+ entry->context = atomic_read_unchecked(&c->context_list_counter);
53236
53237 list_add(&entry->list, &c->context_list);
53238
53239@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
53240
53241 #if BITS_PER_LONG == 64
53242 spin_lock_init(&c->context_list_lock);
53243- atomic_set(&c->context_list_counter, 0);
53244+ atomic_set_unchecked(&c->context_list_counter, 0);
53245 INIT_LIST_HEAD(&c->context_list);
53246 #endif
53247
53248diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
53249index 463da07..e791ce9 100644
53250--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
53251+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
53252@@ -488,13 +488,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
53253 return 0;
53254 }
53255
53256-sfw_test_client_ops_t brw_test_client;
53257-void brw_init_test_client(void)
53258-{
53259- brw_test_client.tso_init = brw_client_init;
53260- brw_test_client.tso_fini = brw_client_fini;
53261- brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
53262- brw_test_client.tso_done_rpc = brw_client_done_rpc;
53263+sfw_test_client_ops_t brw_test_client = {
53264+ .tso_init = brw_client_init,
53265+ .tso_fini = brw_client_fini,
53266+ .tso_prep_rpc = brw_client_prep_rpc,
53267+ .tso_done_rpc = brw_client_done_rpc,
53268 };
53269
53270 srpc_service_t brw_test_service;
53271diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
53272index 5709148..ccd9e0d 100644
53273--- a/drivers/staging/lustre/lnet/selftest/framework.c
53274+++ b/drivers/staging/lustre/lnet/selftest/framework.c
53275@@ -1628,12 +1628,10 @@ static srpc_service_t sfw_services[] = {
53276
53277 extern sfw_test_client_ops_t ping_test_client;
53278 extern srpc_service_t ping_test_service;
53279-extern void ping_init_test_client(void);
53280 extern void ping_init_test_service(void);
53281
53282 extern sfw_test_client_ops_t brw_test_client;
53283 extern srpc_service_t brw_test_service;
53284-extern void brw_init_test_client(void);
53285 extern void brw_init_test_service(void);
53286
53287
53288@@ -1675,12 +1673,10 @@ sfw_startup (void)
53289 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
53290 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
53291
53292- brw_init_test_client();
53293 brw_init_test_service();
53294 rc = sfw_register_test(&brw_test_service, &brw_test_client);
53295 LASSERT (rc == 0);
53296
53297- ping_init_test_client();
53298 ping_init_test_service();
53299 rc = sfw_register_test(&ping_test_service, &ping_test_client);
53300 LASSERT (rc == 0);
53301diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
53302index d8c0df6..5041cbb 100644
53303--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
53304+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
53305@@ -211,14 +211,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
53306 return 0;
53307 }
53308
53309-sfw_test_client_ops_t ping_test_client;
53310-void ping_init_test_client(void)
53311-{
53312- ping_test_client.tso_init = ping_client_init;
53313- ping_test_client.tso_fini = ping_client_fini;
53314- ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
53315- ping_test_client.tso_done_rpc = ping_client_done_rpc;
53316-}
53317+sfw_test_client_ops_t ping_test_client = {
53318+ .tso_init = ping_client_init,
53319+ .tso_fini = ping_client_fini,
53320+ .tso_prep_rpc = ping_client_prep_rpc,
53321+ .tso_done_rpc = ping_client_done_rpc,
53322+};
53323
53324 srpc_service_t ping_test_service;
53325 void ping_init_test_service(void)
53326diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
53327index 83bc0a9..12ba00a 100644
53328--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
53329+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
53330@@ -1139,7 +1139,7 @@ struct ldlm_callback_suite {
53331 ldlm_completion_callback lcs_completion;
53332 ldlm_blocking_callback lcs_blocking;
53333 ldlm_glimpse_callback lcs_glimpse;
53334-};
53335+} __no_const;
53336
53337 /* ldlm_lockd.c */
53338 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
53339diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
53340index 2a88b80..62e7e5f 100644
53341--- a/drivers/staging/lustre/lustre/include/obd.h
53342+++ b/drivers/staging/lustre/lustre/include/obd.h
53343@@ -1362,7 +1362,7 @@ struct md_ops {
53344 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
53345 * wrapper function in include/linux/obd_class.h.
53346 */
53347-};
53348+} __no_const;
53349
53350 struct lsm_operations {
53351 void (*lsm_free)(struct lov_stripe_md *);
53352diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53353index a4c252f..b21acac 100644
53354--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53355+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53356@@ -258,7 +258,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
53357 int added = (mode == LCK_NL);
53358 int overlaps = 0;
53359 int splitted = 0;
53360- const struct ldlm_callback_suite null_cbs = { NULL };
53361+ const struct ldlm_callback_suite null_cbs = { };
53362
53363 CDEBUG(D_DLMTRACE,
53364 "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n",
53365diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53366index c539e37..743b213 100644
53367--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53368+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53369@@ -237,7 +237,7 @@ static int proc_console_max_delay_cs(struct ctl_table *table, int write,
53370 loff_t *ppos)
53371 {
53372 int rc, max_delay_cs;
53373- struct ctl_table dummy = *table;
53374+ ctl_table_no_const dummy = *table;
53375 long d;
53376
53377 dummy.data = &max_delay_cs;
53378@@ -270,7 +270,7 @@ static int proc_console_min_delay_cs(struct ctl_table *table, int write,
53379 loff_t *ppos)
53380 {
53381 int rc, min_delay_cs;
53382- struct ctl_table dummy = *table;
53383+ ctl_table_no_const dummy = *table;
53384 long d;
53385
53386 dummy.data = &min_delay_cs;
53387@@ -302,7 +302,7 @@ static int proc_console_backoff(struct ctl_table *table, int write,
53388 void __user *buffer, size_t *lenp, loff_t *ppos)
53389 {
53390 int rc, backoff;
53391- struct ctl_table dummy = *table;
53392+ ctl_table_no_const dummy = *table;
53393
53394 dummy.data = &backoff;
53395 dummy.proc_handler = &proc_dointvec;
53396diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
53397index 7dc77dd..289d03e 100644
53398--- a/drivers/staging/lustre/lustre/libcfs/module.c
53399+++ b/drivers/staging/lustre/lustre/libcfs/module.c
53400@@ -313,11 +313,11 @@ out:
53401
53402
53403 struct cfs_psdev_ops libcfs_psdev_ops = {
53404- libcfs_psdev_open,
53405- libcfs_psdev_release,
53406- NULL,
53407- NULL,
53408- libcfs_ioctl
53409+ .p_open = libcfs_psdev_open,
53410+ .p_close = libcfs_psdev_release,
53411+ .p_read = NULL,
53412+ .p_write = NULL,
53413+ .p_ioctl = libcfs_ioctl
53414 };
53415
53416 extern int insert_proc(void);
53417diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
53418index 22667db..8b703b6 100644
53419--- a/drivers/staging/octeon/ethernet-rx.c
53420+++ b/drivers/staging/octeon/ethernet-rx.c
53421@@ -354,14 +354,14 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
53422 /* Increment RX stats for virtual ports */
53423 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
53424 #ifdef CONFIG_64BIT
53425- atomic64_add(1,
53426+ atomic64_add_unchecked(1,
53427 (atomic64_t *)&priv->stats.rx_packets);
53428- atomic64_add(skb->len,
53429+ atomic64_add_unchecked(skb->len,
53430 (atomic64_t *)&priv->stats.rx_bytes);
53431 #else
53432- atomic_add(1,
53433+ atomic_add_unchecked(1,
53434 (atomic_t *)&priv->stats.rx_packets);
53435- atomic_add(skb->len,
53436+ atomic_add_unchecked(skb->len,
53437 (atomic_t *)&priv->stats.rx_bytes);
53438 #endif
53439 }
53440@@ -373,10 +373,10 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
53441 dev->name);
53442 */
53443 #ifdef CONFIG_64BIT
53444- atomic64_add(1,
53445+ atomic64_add_unchecked(1,
53446 (atomic64_t *)&priv->stats.rx_dropped);
53447 #else
53448- atomic_add(1,
53449+ atomic_add_unchecked(1,
53450 (atomic_t *)&priv->stats.rx_dropped);
53451 #endif
53452 dev_kfree_skb_irq(skb);
53453diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
53454index 460e854..f926452 100644
53455--- a/drivers/staging/octeon/ethernet.c
53456+++ b/drivers/staging/octeon/ethernet.c
53457@@ -241,11 +241,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
53458 * since the RX tasklet also increments it.
53459 */
53460 #ifdef CONFIG_64BIT
53461- atomic64_add(rx_status.dropped_packets,
53462- (atomic64_t *)&priv->stats.rx_dropped);
53463+ atomic64_add_unchecked(rx_status.dropped_packets,
53464+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
53465 #else
53466- atomic_add(rx_status.dropped_packets,
53467- (atomic_t *)&priv->stats.rx_dropped);
53468+ atomic_add_unchecked(rx_status.dropped_packets,
53469+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
53470 #endif
53471 }
53472
53473diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
53474index 3b476d8..f522d68 100644
53475--- a/drivers/staging/rtl8188eu/include/hal_intf.h
53476+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
53477@@ -225,7 +225,7 @@ struct hal_ops {
53478
53479 void (*hal_notch_filter)(struct adapter *adapter, bool enable);
53480 void (*hal_reset_security_engine)(struct adapter *adapter);
53481-};
53482+} __no_const;
53483
53484 enum rt_eeprom_type {
53485 EEPROM_93C46,
53486diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
53487index 070cc03..6806e37 100644
53488--- a/drivers/staging/rtl8712/rtl871x_io.h
53489+++ b/drivers/staging/rtl8712/rtl871x_io.h
53490@@ -108,7 +108,7 @@ struct _io_ops {
53491 u8 *pmem);
53492 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
53493 u8 *pmem);
53494-};
53495+} __no_const;
53496
53497 struct io_req {
53498 struct list_head list;
53499diff --git a/drivers/staging/unisys/visorchipset/visorchipset.h b/drivers/staging/unisys/visorchipset/visorchipset.h
53500index 98f3ba4..c6a7fce 100644
53501--- a/drivers/staging/unisys/visorchipset/visorchipset.h
53502+++ b/drivers/staging/unisys/visorchipset/visorchipset.h
53503@@ -171,7 +171,7 @@ struct visorchipset_busdev_notifiers {
53504 void (*device_resume)(ulong bus_no, ulong dev_no);
53505 int (*get_channel_info)(uuid_le type_uuid, ulong *min_size,
53506 ulong *max_size);
53507-};
53508+} __no_const;
53509
53510 /* These functions live inside visorchipset, and will be called to indicate
53511 * responses to specific events (by code outside of visorchipset).
53512@@ -186,7 +186,7 @@ struct visorchipset_busdev_responders {
53513 void (*device_destroy)(ulong bus_no, ulong dev_no, int response);
53514 void (*device_pause)(ulong bus_no, ulong dev_no, int response);
53515 void (*device_resume)(ulong bus_no, ulong dev_no, int response);
53516-};
53517+} __no_const;
53518
53519 /** Register functions (in the bus driver) to get called by visorchipset
53520 * whenever a bus or device appears for which this service partition is
53521diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
53522index 9512af6..045bf5a 100644
53523--- a/drivers/target/sbp/sbp_target.c
53524+++ b/drivers/target/sbp/sbp_target.c
53525@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
53526
53527 #define SESSION_MAINTENANCE_INTERVAL HZ
53528
53529-static atomic_t login_id = ATOMIC_INIT(0);
53530+static atomic_unchecked_t login_id = ATOMIC_INIT(0);
53531
53532 static void session_maintenance_work(struct work_struct *);
53533 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
53534@@ -444,7 +444,7 @@ static void sbp_management_request_login(
53535 login->lun = se_lun;
53536 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
53537 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
53538- login->login_id = atomic_inc_return(&login_id);
53539+ login->login_id = atomic_inc_return_unchecked(&login_id);
53540
53541 login->tgt_agt = sbp_target_agent_register(login);
53542 if (IS_ERR(login->tgt_agt)) {
53543diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
53544index 7faa6ae..ae6c410 100644
53545--- a/drivers/target/target_core_device.c
53546+++ b/drivers/target/target_core_device.c
53547@@ -1495,7 +1495,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
53548 spin_lock_init(&dev->se_tmr_lock);
53549 spin_lock_init(&dev->qf_cmd_lock);
53550 sema_init(&dev->caw_sem, 1);
53551- atomic_set(&dev->dev_ordered_id, 0);
53552+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
53553 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
53554 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
53555 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
53556diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
53557index f786de0..04b643e 100644
53558--- a/drivers/target/target_core_transport.c
53559+++ b/drivers/target/target_core_transport.c
53560@@ -1168,7 +1168,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
53561 * Used to determine when ORDERED commands should go from
53562 * Dormant to Active status.
53563 */
53564- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
53565+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
53566 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
53567 cmd->se_ordered_id, cmd->sam_task_attr,
53568 dev->transport->name);
53569diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
53570index 031018e..90981a1 100644
53571--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
53572+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
53573@@ -272,8 +272,10 @@ static int int3400_thermal_probe(struct platform_device *pdev)
53574 platform_set_drvdata(pdev, priv);
53575
53576 if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) {
53577- int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
53578- int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
53579+ pax_open_kernel();
53580+ *(void **)&int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
53581+ *(void **)&int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
53582+ pax_close_kernel();
53583 }
53584 priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
53585 priv, &int3400_thermal_ops,
53586diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
53587index 668fb1b..2737bbe 100644
53588--- a/drivers/thermal/of-thermal.c
53589+++ b/drivers/thermal/of-thermal.c
53590@@ -31,6 +31,7 @@
53591 #include <linux/export.h>
53592 #include <linux/string.h>
53593 #include <linux/thermal.h>
53594+#include <linux/mm.h>
53595
53596 #include "thermal_core.h"
53597
53598@@ -412,9 +413,11 @@ thermal_zone_of_add_sensor(struct device_node *zone,
53599 tz->ops = ops;
53600 tz->sensor_data = data;
53601
53602- tzd->ops->get_temp = of_thermal_get_temp;
53603- tzd->ops->get_trend = of_thermal_get_trend;
53604- tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
53605+ pax_open_kernel();
53606+ *(void **)&tzd->ops->get_temp = of_thermal_get_temp;
53607+ *(void **)&tzd->ops->get_trend = of_thermal_get_trend;
53608+ *(void **)&tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
53609+ pax_close_kernel();
53610 mutex_unlock(&tzd->lock);
53611
53612 return tzd;
53613@@ -544,9 +547,11 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
53614 return;
53615
53616 mutex_lock(&tzd->lock);
53617- tzd->ops->get_temp = NULL;
53618- tzd->ops->get_trend = NULL;
53619- tzd->ops->set_emul_temp = NULL;
53620+ pax_open_kernel();
53621+ *(void **)&tzd->ops->get_temp = NULL;
53622+ *(void **)&tzd->ops->get_trend = NULL;
53623+ *(void **)&tzd->ops->set_emul_temp = NULL;
53624+ pax_close_kernel();
53625
53626 tz->ops = NULL;
53627 tz->sensor_data = NULL;
53628diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
53629index 9ea3d9d..53e8792 100644
53630--- a/drivers/thermal/x86_pkg_temp_thermal.c
53631+++ b/drivers/thermal/x86_pkg_temp_thermal.c
53632@@ -567,7 +567,7 @@ static int pkg_temp_thermal_cpu_callback(struct notifier_block *nfb,
53633 return NOTIFY_OK;
53634 }
53635
53636-static struct notifier_block pkg_temp_thermal_notifier __refdata = {
53637+static struct notifier_block pkg_temp_thermal_notifier __refconst = {
53638 .notifier_call = pkg_temp_thermal_cpu_callback,
53639 };
53640
53641diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
53642index fd66f57..48e6376 100644
53643--- a/drivers/tty/cyclades.c
53644+++ b/drivers/tty/cyclades.c
53645@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
53646 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
53647 info->port.count);
53648 #endif
53649- info->port.count++;
53650+ atomic_inc(&info->port.count);
53651 #ifdef CY_DEBUG_COUNT
53652 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
53653- current->pid, info->port.count);
53654+ current->pid, atomic_read(&info->port.count));
53655 #endif
53656
53657 /*
53658@@ -3974,7 +3974,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
53659 for (j = 0; j < cy_card[i].nports; j++) {
53660 info = &cy_card[i].ports[j];
53661
53662- if (info->port.count) {
53663+ if (atomic_read(&info->port.count)) {
53664 /* XXX is the ldisc num worth this? */
53665 struct tty_struct *tty;
53666 struct tty_ldisc *ld;
53667diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
53668index 4fcec1d..5a036f7 100644
53669--- a/drivers/tty/hvc/hvc_console.c
53670+++ b/drivers/tty/hvc/hvc_console.c
53671@@ -342,7 +342,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
53672
53673 spin_lock_irqsave(&hp->port.lock, flags);
53674 /* Check and then increment for fast path open. */
53675- if (hp->port.count++ > 0) {
53676+ if (atomic_inc_return(&hp->port.count) > 1) {
53677 spin_unlock_irqrestore(&hp->port.lock, flags);
53678 hvc_kick();
53679 return 0;
53680@@ -397,7 +397,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
53681
53682 spin_lock_irqsave(&hp->port.lock, flags);
53683
53684- if (--hp->port.count == 0) {
53685+ if (atomic_dec_return(&hp->port.count) == 0) {
53686 spin_unlock_irqrestore(&hp->port.lock, flags);
53687 /* We are done with the tty pointer now. */
53688 tty_port_tty_set(&hp->port, NULL);
53689@@ -419,9 +419,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
53690 */
53691 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
53692 } else {
53693- if (hp->port.count < 0)
53694+ if (atomic_read(&hp->port.count) < 0)
53695 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
53696- hp->vtermno, hp->port.count);
53697+ hp->vtermno, atomic_read(&hp->port.count));
53698 spin_unlock_irqrestore(&hp->port.lock, flags);
53699 }
53700 }
53701@@ -451,12 +451,12 @@ static void hvc_hangup(struct tty_struct *tty)
53702 * open->hangup case this can be called after the final close so prevent
53703 * that from happening for now.
53704 */
53705- if (hp->port.count <= 0) {
53706+ if (atomic_read(&hp->port.count) <= 0) {
53707 spin_unlock_irqrestore(&hp->port.lock, flags);
53708 return;
53709 }
53710
53711- hp->port.count = 0;
53712+ atomic_set(&hp->port.count, 0);
53713 spin_unlock_irqrestore(&hp->port.lock, flags);
53714 tty_port_tty_set(&hp->port, NULL);
53715
53716@@ -504,7 +504,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
53717 return -EPIPE;
53718
53719 /* FIXME what's this (unprotected) check for? */
53720- if (hp->port.count <= 0)
53721+ if (atomic_read(&hp->port.count) <= 0)
53722 return -EIO;
53723
53724 spin_lock_irqsave(&hp->lock, flags);
53725diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
53726index 81ff7e1..dfb7b71 100644
53727--- a/drivers/tty/hvc/hvcs.c
53728+++ b/drivers/tty/hvc/hvcs.c
53729@@ -83,6 +83,7 @@
53730 #include <asm/hvcserver.h>
53731 #include <asm/uaccess.h>
53732 #include <asm/vio.h>
53733+#include <asm/local.h>
53734
53735 /*
53736 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
53737@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
53738
53739 spin_lock_irqsave(&hvcsd->lock, flags);
53740
53741- if (hvcsd->port.count > 0) {
53742+ if (atomic_read(&hvcsd->port.count) > 0) {
53743 spin_unlock_irqrestore(&hvcsd->lock, flags);
53744 printk(KERN_INFO "HVCS: vterm state unchanged. "
53745 "The hvcs device node is still in use.\n");
53746@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
53747 }
53748 }
53749
53750- hvcsd->port.count = 0;
53751+ atomic_set(&hvcsd->port.count, 0);
53752 hvcsd->port.tty = tty;
53753 tty->driver_data = hvcsd;
53754
53755@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
53756 unsigned long flags;
53757
53758 spin_lock_irqsave(&hvcsd->lock, flags);
53759- hvcsd->port.count++;
53760+ atomic_inc(&hvcsd->port.count);
53761 hvcsd->todo_mask |= HVCS_SCHED_READ;
53762 spin_unlock_irqrestore(&hvcsd->lock, flags);
53763
53764@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
53765 hvcsd = tty->driver_data;
53766
53767 spin_lock_irqsave(&hvcsd->lock, flags);
53768- if (--hvcsd->port.count == 0) {
53769+ if (atomic_dec_and_test(&hvcsd->port.count)) {
53770
53771 vio_disable_interrupts(hvcsd->vdev);
53772
53773@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
53774
53775 free_irq(irq, hvcsd);
53776 return;
53777- } else if (hvcsd->port.count < 0) {
53778+ } else if (atomic_read(&hvcsd->port.count) < 0) {
53779 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
53780 " is missmanaged.\n",
53781- hvcsd->vdev->unit_address, hvcsd->port.count);
53782+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
53783 }
53784
53785 spin_unlock_irqrestore(&hvcsd->lock, flags);
53786@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
53787
53788 spin_lock_irqsave(&hvcsd->lock, flags);
53789 /* Preserve this so that we know how many kref refs to put */
53790- temp_open_count = hvcsd->port.count;
53791+ temp_open_count = atomic_read(&hvcsd->port.count);
53792
53793 /*
53794 * Don't kref put inside the spinlock because the destruction
53795@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
53796 tty->driver_data = NULL;
53797 hvcsd->port.tty = NULL;
53798
53799- hvcsd->port.count = 0;
53800+ atomic_set(&hvcsd->port.count, 0);
53801
53802 /* This will drop any buffered data on the floor which is OK in a hangup
53803 * scenario. */
53804@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
53805 * the middle of a write operation? This is a crummy place to do this
53806 * but we want to keep it all in the spinlock.
53807 */
53808- if (hvcsd->port.count <= 0) {
53809+ if (atomic_read(&hvcsd->port.count) <= 0) {
53810 spin_unlock_irqrestore(&hvcsd->lock, flags);
53811 return -ENODEV;
53812 }
53813@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
53814 {
53815 struct hvcs_struct *hvcsd = tty->driver_data;
53816
53817- if (!hvcsd || hvcsd->port.count <= 0)
53818+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
53819 return 0;
53820
53821 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
53822diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
53823index 4190199..06d5bfa 100644
53824--- a/drivers/tty/hvc/hvsi.c
53825+++ b/drivers/tty/hvc/hvsi.c
53826@@ -85,7 +85,7 @@ struct hvsi_struct {
53827 int n_outbuf;
53828 uint32_t vtermno;
53829 uint32_t virq;
53830- atomic_t seqno; /* HVSI packet sequence number */
53831+ atomic_unchecked_t seqno; /* HVSI packet sequence number */
53832 uint16_t mctrl;
53833 uint8_t state; /* HVSI protocol state */
53834 uint8_t flags;
53835@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
53836
53837 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
53838 packet.hdr.len = sizeof(struct hvsi_query_response);
53839- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53840+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53841 packet.verb = VSV_SEND_VERSION_NUMBER;
53842 packet.u.version = HVSI_VERSION;
53843 packet.query_seqno = query_seqno+1;
53844@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
53845
53846 packet.hdr.type = VS_QUERY_PACKET_HEADER;
53847 packet.hdr.len = sizeof(struct hvsi_query);
53848- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53849+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53850 packet.verb = verb;
53851
53852 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
53853@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
53854 int wrote;
53855
53856 packet.hdr.type = VS_CONTROL_PACKET_HEADER,
53857- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53858+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53859 packet.hdr.len = sizeof(struct hvsi_control);
53860 packet.verb = VSV_SET_MODEM_CTL;
53861 packet.mask = HVSI_TSDTR;
53862@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
53863 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
53864
53865 packet.hdr.type = VS_DATA_PACKET_HEADER;
53866- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53867+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53868 packet.hdr.len = count + sizeof(struct hvsi_header);
53869 memcpy(&packet.data, buf, count);
53870
53871@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
53872 struct hvsi_control packet __ALIGNED__;
53873
53874 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
53875- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53876+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53877 packet.hdr.len = 6;
53878 packet.verb = VSV_CLOSE_PROTOCOL;
53879
53880@@ -725,7 +725,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
53881
53882 tty_port_tty_set(&hp->port, tty);
53883 spin_lock_irqsave(&hp->lock, flags);
53884- hp->port.count++;
53885+ atomic_inc(&hp->port.count);
53886 atomic_set(&hp->seqno, 0);
53887 h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
53888 spin_unlock_irqrestore(&hp->lock, flags);
53889@@ -782,7 +782,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
53890
53891 spin_lock_irqsave(&hp->lock, flags);
53892
53893- if (--hp->port.count == 0) {
53894+ if (atomic_dec_return(&hp->port.count) == 0) {
53895 tty_port_tty_set(&hp->port, NULL);
53896 hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
53897
53898@@ -815,9 +815,9 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
53899
53900 spin_lock_irqsave(&hp->lock, flags);
53901 }
53902- } else if (hp->port.count < 0)
53903+ } else if (atomic_read(&hp->port.count) < 0)
53904 printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
53905- hp - hvsi_ports, hp->port.count);
53906+ hp - hvsi_ports, atomic_read(&hp->port.count));
53907
53908 spin_unlock_irqrestore(&hp->lock, flags);
53909 }
53910@@ -832,7 +832,7 @@ static void hvsi_hangup(struct tty_struct *tty)
53911 tty_port_tty_set(&hp->port, NULL);
53912
53913 spin_lock_irqsave(&hp->lock, flags);
53914- hp->port.count = 0;
53915+ atomic_set(&hp->port.count, 0);
53916 hp->n_outbuf = 0;
53917 spin_unlock_irqrestore(&hp->lock, flags);
53918 }
53919diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
53920index a270f04..7c77b5d 100644
53921--- a/drivers/tty/hvc/hvsi_lib.c
53922+++ b/drivers/tty/hvc/hvsi_lib.c
53923@@ -8,7 +8,7 @@
53924
53925 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
53926 {
53927- packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
53928+ packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
53929
53930 /* Assumes that always succeeds, works in practice */
53931 return pv->put_chars(pv->termno, (char *)packet, packet->len);
53932@@ -20,7 +20,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
53933
53934 /* Reset state */
53935 pv->established = 0;
53936- atomic_set(&pv->seqno, 0);
53937+ atomic_set_unchecked(&pv->seqno, 0);
53938
53939 pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
53940
53941diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
53942index 345cebb..d5a1e9e 100644
53943--- a/drivers/tty/ipwireless/tty.c
53944+++ b/drivers/tty/ipwireless/tty.c
53945@@ -28,6 +28,7 @@
53946 #include <linux/tty_driver.h>
53947 #include <linux/tty_flip.h>
53948 #include <linux/uaccess.h>
53949+#include <asm/local.h>
53950
53951 #include "tty.h"
53952 #include "network.h"
53953@@ -93,10 +94,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
53954 return -ENODEV;
53955
53956 mutex_lock(&tty->ipw_tty_mutex);
53957- if (tty->port.count == 0)
53958+ if (atomic_read(&tty->port.count) == 0)
53959 tty->tx_bytes_queued = 0;
53960
53961- tty->port.count++;
53962+ atomic_inc(&tty->port.count);
53963
53964 tty->port.tty = linux_tty;
53965 linux_tty->driver_data = tty;
53966@@ -112,9 +113,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
53967
53968 static void do_ipw_close(struct ipw_tty *tty)
53969 {
53970- tty->port.count--;
53971-
53972- if (tty->port.count == 0) {
53973+ if (atomic_dec_return(&tty->port.count) == 0) {
53974 struct tty_struct *linux_tty = tty->port.tty;
53975
53976 if (linux_tty != NULL) {
53977@@ -135,7 +134,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
53978 return;
53979
53980 mutex_lock(&tty->ipw_tty_mutex);
53981- if (tty->port.count == 0) {
53982+ if (atomic_read(&tty->port.count) == 0) {
53983 mutex_unlock(&tty->ipw_tty_mutex);
53984 return;
53985 }
53986@@ -158,7 +157,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
53987
53988 mutex_lock(&tty->ipw_tty_mutex);
53989
53990- if (!tty->port.count) {
53991+ if (!atomic_read(&tty->port.count)) {
53992 mutex_unlock(&tty->ipw_tty_mutex);
53993 return;
53994 }
53995@@ -197,7 +196,7 @@ static int ipw_write(struct tty_struct *linux_tty,
53996 return -ENODEV;
53997
53998 mutex_lock(&tty->ipw_tty_mutex);
53999- if (!tty->port.count) {
54000+ if (!atomic_read(&tty->port.count)) {
54001 mutex_unlock(&tty->ipw_tty_mutex);
54002 return -EINVAL;
54003 }
54004@@ -237,7 +236,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
54005 if (!tty)
54006 return -ENODEV;
54007
54008- if (!tty->port.count)
54009+ if (!atomic_read(&tty->port.count))
54010 return -EINVAL;
54011
54012 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
54013@@ -279,7 +278,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
54014 if (!tty)
54015 return 0;
54016
54017- if (!tty->port.count)
54018+ if (!atomic_read(&tty->port.count))
54019 return 0;
54020
54021 return tty->tx_bytes_queued;
54022@@ -360,7 +359,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
54023 if (!tty)
54024 return -ENODEV;
54025
54026- if (!tty->port.count)
54027+ if (!atomic_read(&tty->port.count))
54028 return -EINVAL;
54029
54030 return get_control_lines(tty);
54031@@ -376,7 +375,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
54032 if (!tty)
54033 return -ENODEV;
54034
54035- if (!tty->port.count)
54036+ if (!atomic_read(&tty->port.count))
54037 return -EINVAL;
54038
54039 return set_control_lines(tty, set, clear);
54040@@ -390,7 +389,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
54041 if (!tty)
54042 return -ENODEV;
54043
54044- if (!tty->port.count)
54045+ if (!atomic_read(&tty->port.count))
54046 return -EINVAL;
54047
54048 /* FIXME: Exactly how is the tty object locked here .. */
54049@@ -546,7 +545,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
54050 * are gone */
54051 mutex_lock(&ttyj->ipw_tty_mutex);
54052 }
54053- while (ttyj->port.count)
54054+ while (atomic_read(&ttyj->port.count))
54055 do_ipw_close(ttyj);
54056 ipwireless_disassociate_network_ttys(network,
54057 ttyj->channel_idx);
54058diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
54059index 14c54e0..1efd4f2 100644
54060--- a/drivers/tty/moxa.c
54061+++ b/drivers/tty/moxa.c
54062@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
54063 }
54064
54065 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
54066- ch->port.count++;
54067+ atomic_inc(&ch->port.count);
54068 tty->driver_data = ch;
54069 tty_port_tty_set(&ch->port, tty);
54070 mutex_lock(&ch->port.mutex);
54071diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
54072index c434376..114ce13 100644
54073--- a/drivers/tty/n_gsm.c
54074+++ b/drivers/tty/n_gsm.c
54075@@ -1644,7 +1644,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
54076 spin_lock_init(&dlci->lock);
54077 mutex_init(&dlci->mutex);
54078 dlci->fifo = &dlci->_fifo;
54079- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
54080+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
54081 kfree(dlci);
54082 return NULL;
54083 }
54084@@ -2958,7 +2958,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
54085 struct gsm_dlci *dlci = tty->driver_data;
54086 struct tty_port *port = &dlci->port;
54087
54088- port->count++;
54089+ atomic_inc(&port->count);
54090 tty_port_tty_set(port, tty);
54091
54092 dlci->modem_rx = 0;
54093diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
54094index cf6e0f2..4283167 100644
54095--- a/drivers/tty/n_tty.c
54096+++ b/drivers/tty/n_tty.c
54097@@ -116,7 +116,7 @@ struct n_tty_data {
54098 int minimum_to_wake;
54099
54100 /* consumer-published */
54101- size_t read_tail;
54102+ size_t read_tail __intentional_overflow(-1);
54103 size_t line_start;
54104
54105 /* protected by output lock */
54106@@ -2547,6 +2547,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
54107 {
54108 *ops = tty_ldisc_N_TTY;
54109 ops->owner = NULL;
54110- ops->refcount = ops->flags = 0;
54111+ atomic_set(&ops->refcount, 0);
54112+ ops->flags = 0;
54113 }
54114 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
54115diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
54116index e72ee62..d977ad9 100644
54117--- a/drivers/tty/pty.c
54118+++ b/drivers/tty/pty.c
54119@@ -848,8 +848,10 @@ static void __init unix98_pty_init(void)
54120 panic("Couldn't register Unix98 pts driver");
54121
54122 /* Now create the /dev/ptmx special device */
54123+ pax_open_kernel();
54124 tty_default_fops(&ptmx_fops);
54125- ptmx_fops.open = ptmx_open;
54126+ *(void **)&ptmx_fops.open = ptmx_open;
54127+ pax_close_kernel();
54128
54129 cdev_init(&ptmx_cdev, &ptmx_fops);
54130 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
54131diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
54132index c8dd8dc..dca6cfd 100644
54133--- a/drivers/tty/rocket.c
54134+++ b/drivers/tty/rocket.c
54135@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
54136 tty->driver_data = info;
54137 tty_port_tty_set(port, tty);
54138
54139- if (port->count++ == 0) {
54140+ if (atomic_inc_return(&port->count) == 1) {
54141 atomic_inc(&rp_num_ports_open);
54142
54143 #ifdef ROCKET_DEBUG_OPEN
54144@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
54145 #endif
54146 }
54147 #ifdef ROCKET_DEBUG_OPEN
54148- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
54149+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
54150 #endif
54151
54152 /*
54153@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
54154 spin_unlock_irqrestore(&info->port.lock, flags);
54155 return;
54156 }
54157- if (info->port.count)
54158+ if (atomic_read(&info->port.count))
54159 atomic_dec(&rp_num_ports_open);
54160 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
54161 spin_unlock_irqrestore(&info->port.lock, flags);
54162diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
54163index aa28209..e08fb85 100644
54164--- a/drivers/tty/serial/ioc4_serial.c
54165+++ b/drivers/tty/serial/ioc4_serial.c
54166@@ -437,7 +437,7 @@ struct ioc4_soft {
54167 } is_intr_info[MAX_IOC4_INTR_ENTS];
54168
54169 /* Number of entries active in the above array */
54170- atomic_t is_num_intrs;
54171+ atomic_unchecked_t is_num_intrs;
54172 } is_intr_type[IOC4_NUM_INTR_TYPES];
54173
54174 /* is_ir_lock must be held while
54175@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
54176 BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
54177 || (type == IOC4_OTHER_INTR_TYPE)));
54178
54179- i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
54180+ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
54181 BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
54182
54183 /* Save off the lower level interrupt handler */
54184@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
54185
54186 soft = arg;
54187 for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
54188- num_intrs = (int)atomic_read(
54189+ num_intrs = (int)atomic_read_unchecked(
54190 &soft->is_intr_type[intr_type].is_num_intrs);
54191
54192 this_mir = this_ir = pending_intrs(soft, intr_type);
54193diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
54194index 129dc5b..1da5bb8 100644
54195--- a/drivers/tty/serial/kgdb_nmi.c
54196+++ b/drivers/tty/serial/kgdb_nmi.c
54197@@ -53,7 +53,9 @@ static int kgdb_nmi_console_setup(struct console *co, char *options)
54198 * I/O utilities that messages sent to the console will automatically
54199 * be displayed on the dbg_io.
54200 */
54201- dbg_io_ops->is_console = true;
54202+ pax_open_kernel();
54203+ *(int *)&dbg_io_ops->is_console = true;
54204+ pax_close_kernel();
54205
54206 return 0;
54207 }
54208diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
54209index a260cde..6b2b5ce 100644
54210--- a/drivers/tty/serial/kgdboc.c
54211+++ b/drivers/tty/serial/kgdboc.c
54212@@ -24,8 +24,9 @@
54213 #define MAX_CONFIG_LEN 40
54214
54215 static struct kgdb_io kgdboc_io_ops;
54216+static struct kgdb_io kgdboc_io_ops_console;
54217
54218-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
54219+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
54220 static int configured = -1;
54221
54222 static char config[MAX_CONFIG_LEN];
54223@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
54224 kgdboc_unregister_kbd();
54225 if (configured == 1)
54226 kgdb_unregister_io_module(&kgdboc_io_ops);
54227+ else if (configured == 2)
54228+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
54229 }
54230
54231 static int configure_kgdboc(void)
54232@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
54233 int err;
54234 char *cptr = config;
54235 struct console *cons;
54236+ int is_console = 0;
54237
54238 err = kgdboc_option_setup(config);
54239 if (err || !strlen(config) || isspace(config[0]))
54240 goto noconfig;
54241
54242 err = -ENODEV;
54243- kgdboc_io_ops.is_console = 0;
54244 kgdb_tty_driver = NULL;
54245
54246 kgdboc_use_kms = 0;
54247@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
54248 int idx;
54249 if (cons->device && cons->device(cons, &idx) == p &&
54250 idx == tty_line) {
54251- kgdboc_io_ops.is_console = 1;
54252+ is_console = 1;
54253 break;
54254 }
54255 cons = cons->next;
54256@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
54257 kgdb_tty_line = tty_line;
54258
54259 do_register:
54260- err = kgdb_register_io_module(&kgdboc_io_ops);
54261+ if (is_console) {
54262+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
54263+ configured = 2;
54264+ } else {
54265+ err = kgdb_register_io_module(&kgdboc_io_ops);
54266+ configured = 1;
54267+ }
54268 if (err)
54269 goto noconfig;
54270
54271@@ -205,8 +214,6 @@ do_register:
54272 if (err)
54273 goto nmi_con_failed;
54274
54275- configured = 1;
54276-
54277 return 0;
54278
54279 nmi_con_failed:
54280@@ -223,7 +230,7 @@ noconfig:
54281 static int __init init_kgdboc(void)
54282 {
54283 /* Already configured? */
54284- if (configured == 1)
54285+ if (configured >= 1)
54286 return 0;
54287
54288 return configure_kgdboc();
54289@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
54290 if (config[len - 1] == '\n')
54291 config[len - 1] = '\0';
54292
54293- if (configured == 1)
54294+ if (configured >= 1)
54295 cleanup_kgdboc();
54296
54297 /* Go and configure with the new params. */
54298@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
54299 .post_exception = kgdboc_post_exp_handler,
54300 };
54301
54302+static struct kgdb_io kgdboc_io_ops_console = {
54303+ .name = "kgdboc",
54304+ .read_char = kgdboc_get_char,
54305+ .write_char = kgdboc_put_char,
54306+ .pre_exception = kgdboc_pre_exp_handler,
54307+ .post_exception = kgdboc_post_exp_handler,
54308+ .is_console = 1
54309+};
54310+
54311 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
54312 /* This is only available if kgdboc is a built in for early debugging */
54313 static int __init kgdboc_early_init(char *opt)
54314diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
54315index b73889c..9f74f0a 100644
54316--- a/drivers/tty/serial/msm_serial.c
54317+++ b/drivers/tty/serial/msm_serial.c
54318@@ -1012,7 +1012,7 @@ static struct uart_driver msm_uart_driver = {
54319 .cons = MSM_CONSOLE,
54320 };
54321
54322-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
54323+static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
54324
54325 static const struct of_device_id msm_uartdm_table[] = {
54326 { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 },
54327@@ -1036,7 +1036,7 @@ static int msm_serial_probe(struct platform_device *pdev)
54328 line = pdev->id;
54329
54330 if (line < 0)
54331- line = atomic_inc_return(&msm_uart_next_id) - 1;
54332+ line = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
54333
54334 if (unlikely(line < 0 || line >= UART_NR))
54335 return -ENXIO;
54336diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
54337index cf08876..711e0bf 100644
54338--- a/drivers/tty/serial/samsung.c
54339+++ b/drivers/tty/serial/samsung.c
54340@@ -987,11 +987,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
54341 ourport->tx_in_progress = 0;
54342 }
54343
54344+static int s3c64xx_serial_startup(struct uart_port *port);
54345 static int s3c24xx_serial_startup(struct uart_port *port)
54346 {
54347 struct s3c24xx_uart_port *ourport = to_ourport(port);
54348 int ret;
54349
54350+ /* Startup sequence is different for s3c64xx and higher SoC's */
54351+ if (s3c24xx_serial_has_interrupt_mask(port))
54352+ return s3c64xx_serial_startup(port);
54353+
54354 dbg("s3c24xx_serial_startup: port=%p (%08llx,%p)\n",
54355 port, (unsigned long long)port->mapbase, port->membase);
54356
54357@@ -1697,10 +1702,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
54358 /* setup info for port */
54359 port->dev = &platdev->dev;
54360
54361- /* Startup sequence is different for s3c64xx and higher SoC's */
54362- if (s3c24xx_serial_has_interrupt_mask(port))
54363- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
54364-
54365 port->uartclk = 1;
54366
54367 if (cfg->uart_flags & UPF_CONS_FLOW) {
54368diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
54369index 6a1055a..5ca9ad9 100644
54370--- a/drivers/tty/serial/serial_core.c
54371+++ b/drivers/tty/serial/serial_core.c
54372@@ -1377,7 +1377,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
54373 state = drv->state + tty->index;
54374 port = &state->port;
54375 spin_lock_irq(&port->lock);
54376- --port->count;
54377+ atomic_dec(&port->count);
54378 spin_unlock_irq(&port->lock);
54379 return;
54380 }
54381@@ -1387,7 +1387,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
54382
54383 pr_debug("uart_close(%d) called\n", uport ? uport->line : -1);
54384
54385- if (!port->count || tty_port_close_start(port, tty, filp) == 0)
54386+ if (!atomic_read(&port->count) || tty_port_close_start(port, tty, filp) == 0)
54387 return;
54388
54389 /*
54390@@ -1511,7 +1511,7 @@ static void uart_hangup(struct tty_struct *tty)
54391 uart_flush_buffer(tty);
54392 uart_shutdown(tty, state);
54393 spin_lock_irqsave(&port->lock, flags);
54394- port->count = 0;
54395+ atomic_set(&port->count, 0);
54396 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
54397 spin_unlock_irqrestore(&port->lock, flags);
54398 tty_port_tty_set(port, NULL);
54399@@ -1598,7 +1598,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
54400 pr_debug("uart_open(%d) called\n", line);
54401
54402 spin_lock_irq(&port->lock);
54403- ++port->count;
54404+ atomic_inc(&port->count);
54405 spin_unlock_irq(&port->lock);
54406
54407 /*
54408diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
54409index b799170..87dafd5 100644
54410--- a/drivers/tty/synclink.c
54411+++ b/drivers/tty/synclink.c
54412@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
54413
54414 if (debug_level >= DEBUG_LEVEL_INFO)
54415 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
54416- __FILE__,__LINE__, info->device_name, info->port.count);
54417+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
54418
54419 if (tty_port_close_start(&info->port, tty, filp) == 0)
54420 goto cleanup;
54421@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
54422 cleanup:
54423 if (debug_level >= DEBUG_LEVEL_INFO)
54424 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
54425- tty->driver->name, info->port.count);
54426+ tty->driver->name, atomic_read(&info->port.count));
54427
54428 } /* end of mgsl_close() */
54429
54430@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
54431
54432 mgsl_flush_buffer(tty);
54433 shutdown(info);
54434-
54435- info->port.count = 0;
54436+
54437+ atomic_set(&info->port.count, 0);
54438 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
54439 info->port.tty = NULL;
54440
54441@@ -3296,10 +3296,10 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54442
54443 if (debug_level >= DEBUG_LEVEL_INFO)
54444 printk("%s(%d):block_til_ready before block on %s count=%d\n",
54445- __FILE__,__LINE__, tty->driver->name, port->count );
54446+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54447
54448 spin_lock_irqsave(&info->irq_spinlock, flags);
54449- port->count--;
54450+ atomic_dec(&port->count);
54451 spin_unlock_irqrestore(&info->irq_spinlock, flags);
54452 port->blocked_open++;
54453
54454@@ -3327,7 +3327,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54455
54456 if (debug_level >= DEBUG_LEVEL_INFO)
54457 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
54458- __FILE__,__LINE__, tty->driver->name, port->count );
54459+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54460
54461 tty_unlock(tty);
54462 schedule();
54463@@ -3339,12 +3339,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54464
54465 /* FIXME: Racy on hangup during close wait */
54466 if (!tty_hung_up_p(filp))
54467- port->count++;
54468+ atomic_inc(&port->count);
54469 port->blocked_open--;
54470
54471 if (debug_level >= DEBUG_LEVEL_INFO)
54472 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
54473- __FILE__,__LINE__, tty->driver->name, port->count );
54474+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54475
54476 if (!retval)
54477 port->flags |= ASYNC_NORMAL_ACTIVE;
54478@@ -3396,7 +3396,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
54479
54480 if (debug_level >= DEBUG_LEVEL_INFO)
54481 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
54482- __FILE__,__LINE__,tty->driver->name, info->port.count);
54483+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
54484
54485 /* If port is closing, signal caller to try again */
54486 if (info->port.flags & ASYNC_CLOSING){
54487@@ -3415,10 +3415,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
54488 spin_unlock_irqrestore(&info->netlock, flags);
54489 goto cleanup;
54490 }
54491- info->port.count++;
54492+ atomic_inc(&info->port.count);
54493 spin_unlock_irqrestore(&info->netlock, flags);
54494
54495- if (info->port.count == 1) {
54496+ if (atomic_read(&info->port.count) == 1) {
54497 /* 1st open on this device, init hardware */
54498 retval = startup(info);
54499 if (retval < 0)
54500@@ -3442,8 +3442,8 @@ cleanup:
54501 if (retval) {
54502 if (tty->count == 1)
54503 info->port.tty = NULL; /* tty layer will release tty struct */
54504- if(info->port.count)
54505- info->port.count--;
54506+ if (atomic_read(&info->port.count))
54507+ atomic_dec(&info->port.count);
54508 }
54509
54510 return retval;
54511@@ -7661,7 +7661,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
54512 unsigned short new_crctype;
54513
54514 /* return error if TTY interface open */
54515- if (info->port.count)
54516+ if (atomic_read(&info->port.count))
54517 return -EBUSY;
54518
54519 switch (encoding)
54520@@ -7756,7 +7756,7 @@ static int hdlcdev_open(struct net_device *dev)
54521
54522 /* arbitrate between network and tty opens */
54523 spin_lock_irqsave(&info->netlock, flags);
54524- if (info->port.count != 0 || info->netcount != 0) {
54525+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
54526 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
54527 spin_unlock_irqrestore(&info->netlock, flags);
54528 return -EBUSY;
54529@@ -7842,7 +7842,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
54530 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
54531
54532 /* return error if TTY interface open */
54533- if (info->port.count)
54534+ if (atomic_read(&info->port.count))
54535 return -EBUSY;
54536
54537 if (cmd != SIOCWANDEV)
54538diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
54539index 0e8c39b..e0cb171 100644
54540--- a/drivers/tty/synclink_gt.c
54541+++ b/drivers/tty/synclink_gt.c
54542@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
54543 tty->driver_data = info;
54544 info->port.tty = tty;
54545
54546- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
54547+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
54548
54549 /* If port is closing, signal caller to try again */
54550 if (info->port.flags & ASYNC_CLOSING){
54551@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
54552 mutex_unlock(&info->port.mutex);
54553 goto cleanup;
54554 }
54555- info->port.count++;
54556+ atomic_inc(&info->port.count);
54557 spin_unlock_irqrestore(&info->netlock, flags);
54558
54559- if (info->port.count == 1) {
54560+ if (atomic_read(&info->port.count) == 1) {
54561 /* 1st open on this device, init hardware */
54562 retval = startup(info);
54563 if (retval < 0) {
54564@@ -715,8 +715,8 @@ cleanup:
54565 if (retval) {
54566 if (tty->count == 1)
54567 info->port.tty = NULL; /* tty layer will release tty struct */
54568- if(info->port.count)
54569- info->port.count--;
54570+ if(atomic_read(&info->port.count))
54571+ atomic_dec(&info->port.count);
54572 }
54573
54574 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
54575@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54576
54577 if (sanity_check(info, tty->name, "close"))
54578 return;
54579- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
54580+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
54581
54582 if (tty_port_close_start(&info->port, tty, filp) == 0)
54583 goto cleanup;
54584@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54585 tty_port_close_end(&info->port, tty);
54586 info->port.tty = NULL;
54587 cleanup:
54588- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
54589+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
54590 }
54591
54592 static void hangup(struct tty_struct *tty)
54593@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
54594 shutdown(info);
54595
54596 spin_lock_irqsave(&info->port.lock, flags);
54597- info->port.count = 0;
54598+ atomic_set(&info->port.count, 0);
54599 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
54600 info->port.tty = NULL;
54601 spin_unlock_irqrestore(&info->port.lock, flags);
54602@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
54603 unsigned short new_crctype;
54604
54605 /* return error if TTY interface open */
54606- if (info->port.count)
54607+ if (atomic_read(&info->port.count))
54608 return -EBUSY;
54609
54610 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
54611@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
54612
54613 /* arbitrate between network and tty opens */
54614 spin_lock_irqsave(&info->netlock, flags);
54615- if (info->port.count != 0 || info->netcount != 0) {
54616+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
54617 DBGINFO(("%s hdlc_open busy\n", dev->name));
54618 spin_unlock_irqrestore(&info->netlock, flags);
54619 return -EBUSY;
54620@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
54621 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
54622
54623 /* return error if TTY interface open */
54624- if (info->port.count)
54625+ if (atomic_read(&info->port.count))
54626 return -EBUSY;
54627
54628 if (cmd != SIOCWANDEV)
54629@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
54630 if (port == NULL)
54631 continue;
54632 spin_lock(&port->lock);
54633- if ((port->port.count || port->netcount) &&
54634+ if ((atomic_read(&port->port.count) || port->netcount) &&
54635 port->pending_bh && !port->bh_running &&
54636 !port->bh_requested) {
54637 DBGISR(("%s bh queued\n", port->device_name));
54638@@ -3299,7 +3299,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54639 add_wait_queue(&port->open_wait, &wait);
54640
54641 spin_lock_irqsave(&info->lock, flags);
54642- port->count--;
54643+ atomic_dec(&port->count);
54644 spin_unlock_irqrestore(&info->lock, flags);
54645 port->blocked_open++;
54646
54647@@ -3335,7 +3335,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54648 remove_wait_queue(&port->open_wait, &wait);
54649
54650 if (!tty_hung_up_p(filp))
54651- port->count++;
54652+ atomic_inc(&port->count);
54653 port->blocked_open--;
54654
54655 if (!retval)
54656diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
54657index c3f9091..abe4601 100644
54658--- a/drivers/tty/synclinkmp.c
54659+++ b/drivers/tty/synclinkmp.c
54660@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
54661
54662 if (debug_level >= DEBUG_LEVEL_INFO)
54663 printk("%s(%d):%s open(), old ref count = %d\n",
54664- __FILE__,__LINE__,tty->driver->name, info->port.count);
54665+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
54666
54667 /* If port is closing, signal caller to try again */
54668 if (info->port.flags & ASYNC_CLOSING){
54669@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
54670 spin_unlock_irqrestore(&info->netlock, flags);
54671 goto cleanup;
54672 }
54673- info->port.count++;
54674+ atomic_inc(&info->port.count);
54675 spin_unlock_irqrestore(&info->netlock, flags);
54676
54677- if (info->port.count == 1) {
54678+ if (atomic_read(&info->port.count) == 1) {
54679 /* 1st open on this device, init hardware */
54680 retval = startup(info);
54681 if (retval < 0)
54682@@ -796,8 +796,8 @@ cleanup:
54683 if (retval) {
54684 if (tty->count == 1)
54685 info->port.tty = NULL; /* tty layer will release tty struct */
54686- if(info->port.count)
54687- info->port.count--;
54688+ if(atomic_read(&info->port.count))
54689+ atomic_dec(&info->port.count);
54690 }
54691
54692 return retval;
54693@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54694
54695 if (debug_level >= DEBUG_LEVEL_INFO)
54696 printk("%s(%d):%s close() entry, count=%d\n",
54697- __FILE__,__LINE__, info->device_name, info->port.count);
54698+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
54699
54700 if (tty_port_close_start(&info->port, tty, filp) == 0)
54701 goto cleanup;
54702@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54703 cleanup:
54704 if (debug_level >= DEBUG_LEVEL_INFO)
54705 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
54706- tty->driver->name, info->port.count);
54707+ tty->driver->name, atomic_read(&info->port.count));
54708 }
54709
54710 /* Called by tty_hangup() when a hangup is signaled.
54711@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
54712 shutdown(info);
54713
54714 spin_lock_irqsave(&info->port.lock, flags);
54715- info->port.count = 0;
54716+ atomic_set(&info->port.count, 0);
54717 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
54718 info->port.tty = NULL;
54719 spin_unlock_irqrestore(&info->port.lock, flags);
54720@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
54721 unsigned short new_crctype;
54722
54723 /* return error if TTY interface open */
54724- if (info->port.count)
54725+ if (atomic_read(&info->port.count))
54726 return -EBUSY;
54727
54728 switch (encoding)
54729@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
54730
54731 /* arbitrate between network and tty opens */
54732 spin_lock_irqsave(&info->netlock, flags);
54733- if (info->port.count != 0 || info->netcount != 0) {
54734+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
54735 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
54736 spin_unlock_irqrestore(&info->netlock, flags);
54737 return -EBUSY;
54738@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
54739 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
54740
54741 /* return error if TTY interface open */
54742- if (info->port.count)
54743+ if (atomic_read(&info->port.count))
54744 return -EBUSY;
54745
54746 if (cmd != SIOCWANDEV)
54747@@ -2621,7 +2621,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
54748 * do not request bottom half processing if the
54749 * device is not open in a normal mode.
54750 */
54751- if ( port && (port->port.count || port->netcount) &&
54752+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
54753 port->pending_bh && !port->bh_running &&
54754 !port->bh_requested ) {
54755 if ( debug_level >= DEBUG_LEVEL_ISR )
54756@@ -3318,10 +3318,10 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54757
54758 if (debug_level >= DEBUG_LEVEL_INFO)
54759 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
54760- __FILE__,__LINE__, tty->driver->name, port->count );
54761+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54762
54763 spin_lock_irqsave(&info->lock, flags);
54764- port->count--;
54765+ atomic_dec(&port->count);
54766 spin_unlock_irqrestore(&info->lock, flags);
54767 port->blocked_open++;
54768
54769@@ -3349,7 +3349,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54770
54771 if (debug_level >= DEBUG_LEVEL_INFO)
54772 printk("%s(%d):%s block_til_ready() count=%d\n",
54773- __FILE__,__LINE__, tty->driver->name, port->count );
54774+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54775
54776 tty_unlock(tty);
54777 schedule();
54778@@ -3359,12 +3359,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54779 set_current_state(TASK_RUNNING);
54780 remove_wait_queue(&port->open_wait, &wait);
54781 if (!tty_hung_up_p(filp))
54782- port->count++;
54783+ atomic_inc(&port->count);
54784 port->blocked_open--;
54785
54786 if (debug_level >= DEBUG_LEVEL_INFO)
54787 printk("%s(%d):%s block_til_ready() after, count=%d\n",
54788- __FILE__,__LINE__, tty->driver->name, port->count );
54789+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54790
54791 if (!retval)
54792 port->flags |= ASYNC_NORMAL_ACTIVE;
54793diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
54794index 259a4d5..9b0c9e7 100644
54795--- a/drivers/tty/sysrq.c
54796+++ b/drivers/tty/sysrq.c
54797@@ -1085,7 +1085,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
54798 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
54799 size_t count, loff_t *ppos)
54800 {
54801- if (count) {
54802+ if (count && capable(CAP_SYS_ADMIN)) {
54803 char c;
54804
54805 if (get_user(c, buf))
54806diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
54807index 2bb4dfc..a7f6e86 100644
54808--- a/drivers/tty/tty_io.c
54809+++ b/drivers/tty/tty_io.c
54810@@ -3503,7 +3503,7 @@ EXPORT_SYMBOL(tty_devnum);
54811
54812 void tty_default_fops(struct file_operations *fops)
54813 {
54814- *fops = tty_fops;
54815+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
54816 }
54817
54818 /*
54819diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
54820index 3737f55..7cef448 100644
54821--- a/drivers/tty/tty_ldisc.c
54822+++ b/drivers/tty/tty_ldisc.c
54823@@ -71,7 +71,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
54824 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
54825 tty_ldiscs[disc] = new_ldisc;
54826 new_ldisc->num = disc;
54827- new_ldisc->refcount = 0;
54828+ atomic_set(&new_ldisc->refcount, 0);
54829 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
54830
54831 return ret;
54832@@ -99,7 +99,7 @@ int tty_unregister_ldisc(int disc)
54833 return -EINVAL;
54834
54835 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
54836- if (tty_ldiscs[disc]->refcount)
54837+ if (atomic_read(&tty_ldiscs[disc]->refcount))
54838 ret = -EBUSY;
54839 else
54840 tty_ldiscs[disc] = NULL;
54841@@ -120,7 +120,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
54842 if (ldops) {
54843 ret = ERR_PTR(-EAGAIN);
54844 if (try_module_get(ldops->owner)) {
54845- ldops->refcount++;
54846+ atomic_inc(&ldops->refcount);
54847 ret = ldops;
54848 }
54849 }
54850@@ -133,7 +133,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
54851 unsigned long flags;
54852
54853 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
54854- ldops->refcount--;
54855+ atomic_dec(&ldops->refcount);
54856 module_put(ldops->owner);
54857 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
54858 }
54859diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
54860index 40b31835..94d92ae 100644
54861--- a/drivers/tty/tty_port.c
54862+++ b/drivers/tty/tty_port.c
54863@@ -236,7 +236,7 @@ void tty_port_hangup(struct tty_port *port)
54864 unsigned long flags;
54865
54866 spin_lock_irqsave(&port->lock, flags);
54867- port->count = 0;
54868+ atomic_set(&port->count, 0);
54869 port->flags &= ~ASYNC_NORMAL_ACTIVE;
54870 tty = port->tty;
54871 if (tty)
54872@@ -398,7 +398,7 @@ int tty_port_block_til_ready(struct tty_port *port,
54873
54874 /* The port lock protects the port counts */
54875 spin_lock_irqsave(&port->lock, flags);
54876- port->count--;
54877+ atomic_dec(&port->count);
54878 port->blocked_open++;
54879 spin_unlock_irqrestore(&port->lock, flags);
54880
54881@@ -440,7 +440,7 @@ int tty_port_block_til_ready(struct tty_port *port,
54882 we must not mess that up further */
54883 spin_lock_irqsave(&port->lock, flags);
54884 if (!tty_hung_up_p(filp))
54885- port->count++;
54886+ atomic_inc(&port->count);
54887 port->blocked_open--;
54888 if (retval == 0)
54889 port->flags |= ASYNC_NORMAL_ACTIVE;
54890@@ -476,19 +476,19 @@ int tty_port_close_start(struct tty_port *port,
54891 return 0;
54892
54893 spin_lock_irqsave(&port->lock, flags);
54894- if (tty->count == 1 && port->count != 1) {
54895+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
54896 printk(KERN_WARNING
54897 "tty_port_close_start: tty->count = 1 port count = %d.\n",
54898- port->count);
54899- port->count = 1;
54900+ atomic_read(&port->count));
54901+ atomic_set(&port->count, 1);
54902 }
54903- if (--port->count < 0) {
54904+ if (atomic_dec_return(&port->count) < 0) {
54905 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
54906- port->count);
54907- port->count = 0;
54908+ atomic_read(&port->count));
54909+ atomic_set(&port->count, 0);
54910 }
54911
54912- if (port->count) {
54913+ if (atomic_read(&port->count)) {
54914 spin_unlock_irqrestore(&port->lock, flags);
54915 return 0;
54916 }
54917@@ -590,7 +590,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
54918 struct file *filp)
54919 {
54920 spin_lock_irq(&port->lock);
54921- ++port->count;
54922+ atomic_inc(&port->count);
54923 spin_unlock_irq(&port->lock);
54924 tty_port_tty_set(port, tty);
54925
54926diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
54927index 8a89f6e..50b32af 100644
54928--- a/drivers/tty/vt/keyboard.c
54929+++ b/drivers/tty/vt/keyboard.c
54930@@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
54931 kbd->kbdmode == VC_OFF) &&
54932 value != KVAL(K_SAK))
54933 return; /* SAK is allowed even in raw mode */
54934+
54935+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
54936+ {
54937+ void *func = fn_handler[value];
54938+ if (func == fn_show_state || func == fn_show_ptregs ||
54939+ func == fn_show_mem)
54940+ return;
54941+ }
54942+#endif
54943+
54944 fn_handler[value](vc);
54945 }
54946
54947@@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
54948 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
54949 return -EFAULT;
54950
54951- if (!capable(CAP_SYS_TTY_CONFIG))
54952- perm = 0;
54953-
54954 switch (cmd) {
54955 case KDGKBENT:
54956 /* Ensure another thread doesn't free it under us */
54957@@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
54958 spin_unlock_irqrestore(&kbd_event_lock, flags);
54959 return put_user(val, &user_kbe->kb_value);
54960 case KDSKBENT:
54961+ if (!capable(CAP_SYS_TTY_CONFIG))
54962+ perm = 0;
54963+
54964 if (!perm)
54965 return -EPERM;
54966 if (!i && v == K_NOSUCHMAP) {
54967@@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
54968 int i, j, k;
54969 int ret;
54970
54971- if (!capable(CAP_SYS_TTY_CONFIG))
54972- perm = 0;
54973-
54974 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
54975 if (!kbs) {
54976 ret = -ENOMEM;
54977@@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
54978 kfree(kbs);
54979 return ((p && *p) ? -EOVERFLOW : 0);
54980 case KDSKBSENT:
54981+ if (!capable(CAP_SYS_TTY_CONFIG))
54982+ perm = 0;
54983+
54984 if (!perm) {
54985 ret = -EPERM;
54986 goto reterr;
54987diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
54988index 6276f13..84f2449 100644
54989--- a/drivers/uio/uio.c
54990+++ b/drivers/uio/uio.c
54991@@ -25,6 +25,7 @@
54992 #include <linux/kobject.h>
54993 #include <linux/cdev.h>
54994 #include <linux/uio_driver.h>
54995+#include <asm/local.h>
54996
54997 #define UIO_MAX_DEVICES (1U << MINORBITS)
54998
54999@@ -231,7 +232,7 @@ static ssize_t event_show(struct device *dev,
55000 struct device_attribute *attr, char *buf)
55001 {
55002 struct uio_device *idev = dev_get_drvdata(dev);
55003- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
55004+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
55005 }
55006 static DEVICE_ATTR_RO(event);
55007
55008@@ -393,7 +394,7 @@ void uio_event_notify(struct uio_info *info)
55009 {
55010 struct uio_device *idev = info->uio_dev;
55011
55012- atomic_inc(&idev->event);
55013+ atomic_inc_unchecked(&idev->event);
55014 wake_up_interruptible(&idev->wait);
55015 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
55016 }
55017@@ -446,7 +447,7 @@ static int uio_open(struct inode *inode, struct file *filep)
55018 }
55019
55020 listener->dev = idev;
55021- listener->event_count = atomic_read(&idev->event);
55022+ listener->event_count = atomic_read_unchecked(&idev->event);
55023 filep->private_data = listener;
55024
55025 if (idev->info->open) {
55026@@ -497,7 +498,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
55027 return -EIO;
55028
55029 poll_wait(filep, &idev->wait, wait);
55030- if (listener->event_count != atomic_read(&idev->event))
55031+ if (listener->event_count != atomic_read_unchecked(&idev->event))
55032 return POLLIN | POLLRDNORM;
55033 return 0;
55034 }
55035@@ -522,7 +523,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
55036 do {
55037 set_current_state(TASK_INTERRUPTIBLE);
55038
55039- event_count = atomic_read(&idev->event);
55040+ event_count = atomic_read_unchecked(&idev->event);
55041 if (event_count != listener->event_count) {
55042 if (copy_to_user(buf, &event_count, count))
55043 retval = -EFAULT;
55044@@ -579,9 +580,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
55045 static int uio_find_mem_index(struct vm_area_struct *vma)
55046 {
55047 struct uio_device *idev = vma->vm_private_data;
55048+ unsigned long size;
55049
55050 if (vma->vm_pgoff < MAX_UIO_MAPS) {
55051- if (idev->info->mem[vma->vm_pgoff].size == 0)
55052+ size = idev->info->mem[vma->vm_pgoff].size;
55053+ if (size == 0)
55054+ return -1;
55055+ if (vma->vm_end - vma->vm_start > size)
55056 return -1;
55057 return (int)vma->vm_pgoff;
55058 }
55059@@ -813,7 +818,7 @@ int __uio_register_device(struct module *owner,
55060 idev->owner = owner;
55061 idev->info = info;
55062 init_waitqueue_head(&idev->wait);
55063- atomic_set(&idev->event, 0);
55064+ atomic_set_unchecked(&idev->event, 0);
55065
55066 ret = uio_get_minor(idev);
55067 if (ret)
55068diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
55069index 813d4d3..a71934f 100644
55070--- a/drivers/usb/atm/cxacru.c
55071+++ b/drivers/usb/atm/cxacru.c
55072@@ -472,7 +472,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
55073 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
55074 if (ret < 2)
55075 return -EINVAL;
55076- if (index < 0 || index > 0x7f)
55077+ if (index > 0x7f)
55078 return -EINVAL;
55079 pos += tmp;
55080
55081diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
55082index dada014..1d0d517 100644
55083--- a/drivers/usb/atm/usbatm.c
55084+++ b/drivers/usb/atm/usbatm.c
55085@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55086 if (printk_ratelimit())
55087 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
55088 __func__, vpi, vci);
55089- atomic_inc(&vcc->stats->rx_err);
55090+ atomic_inc_unchecked(&vcc->stats->rx_err);
55091 return;
55092 }
55093
55094@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55095 if (length > ATM_MAX_AAL5_PDU) {
55096 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
55097 __func__, length, vcc);
55098- atomic_inc(&vcc->stats->rx_err);
55099+ atomic_inc_unchecked(&vcc->stats->rx_err);
55100 goto out;
55101 }
55102
55103@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55104 if (sarb->len < pdu_length) {
55105 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
55106 __func__, pdu_length, sarb->len, vcc);
55107- atomic_inc(&vcc->stats->rx_err);
55108+ atomic_inc_unchecked(&vcc->stats->rx_err);
55109 goto out;
55110 }
55111
55112 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
55113 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
55114 __func__, vcc);
55115- atomic_inc(&vcc->stats->rx_err);
55116+ atomic_inc_unchecked(&vcc->stats->rx_err);
55117 goto out;
55118 }
55119
55120@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55121 if (printk_ratelimit())
55122 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
55123 __func__, length);
55124- atomic_inc(&vcc->stats->rx_drop);
55125+ atomic_inc_unchecked(&vcc->stats->rx_drop);
55126 goto out;
55127 }
55128
55129@@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55130
55131 vcc->push(vcc, skb);
55132
55133- atomic_inc(&vcc->stats->rx);
55134+ atomic_inc_unchecked(&vcc->stats->rx);
55135 out:
55136 skb_trim(sarb, 0);
55137 }
55138@@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
55139 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
55140
55141 usbatm_pop(vcc, skb);
55142- atomic_inc(&vcc->stats->tx);
55143+ atomic_inc_unchecked(&vcc->stats->tx);
55144
55145 skb = skb_dequeue(&instance->sndqueue);
55146 }
55147@@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page
55148 if (!left--)
55149 return sprintf(page,
55150 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
55151- atomic_read(&atm_dev->stats.aal5.tx),
55152- atomic_read(&atm_dev->stats.aal5.tx_err),
55153- atomic_read(&atm_dev->stats.aal5.rx),
55154- atomic_read(&atm_dev->stats.aal5.rx_err),
55155- atomic_read(&atm_dev->stats.aal5.rx_drop));
55156+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
55157+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
55158+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
55159+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
55160+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
55161
55162 if (!left--) {
55163 if (instance->disconnected)
55164diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
55165index 2a3bbdf..91d72cf 100644
55166--- a/drivers/usb/core/devices.c
55167+++ b/drivers/usb/core/devices.c
55168@@ -126,7 +126,7 @@ static const char format_endpt[] =
55169 * time it gets called.
55170 */
55171 static struct device_connect_event {
55172- atomic_t count;
55173+ atomic_unchecked_t count;
55174 wait_queue_head_t wait;
55175 } device_event = {
55176 .count = ATOMIC_INIT(1),
55177@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
55178
55179 void usbfs_conn_disc_event(void)
55180 {
55181- atomic_add(2, &device_event.count);
55182+ atomic_add_unchecked(2, &device_event.count);
55183 wake_up(&device_event.wait);
55184 }
55185
55186@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
55187
55188 poll_wait(file, &device_event.wait, wait);
55189
55190- event_count = atomic_read(&device_event.count);
55191+ event_count = atomic_read_unchecked(&device_event.count);
55192 if (file->f_version != event_count) {
55193 file->f_version = event_count;
55194 return POLLIN | POLLRDNORM;
55195diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
55196index 1163553..f292679 100644
55197--- a/drivers/usb/core/devio.c
55198+++ b/drivers/usb/core/devio.c
55199@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
55200 struct usb_dev_state *ps = file->private_data;
55201 struct usb_device *dev = ps->dev;
55202 ssize_t ret = 0;
55203- unsigned len;
55204+ size_t len;
55205 loff_t pos;
55206 int i;
55207
55208@@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
55209 for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
55210 struct usb_config_descriptor *config =
55211 (struct usb_config_descriptor *)dev->rawdescriptors[i];
55212- unsigned int length = le16_to_cpu(config->wTotalLength);
55213+ size_t length = le16_to_cpu(config->wTotalLength);
55214
55215 if (*ppos < pos + length) {
55216
55217 /* The descriptor may claim to be longer than it
55218 * really is. Here is the actual allocated length. */
55219- unsigned alloclen =
55220+ size_t alloclen =
55221 le16_to_cpu(dev->config[i].desc.wTotalLength);
55222
55223- len = length - (*ppos - pos);
55224+ len = length + pos - *ppos;
55225 if (len > nbytes)
55226 len = nbytes;
55227
55228 /* Simply don't write (skip over) unallocated parts */
55229 if (alloclen > (*ppos - pos)) {
55230- alloclen -= (*ppos - pos);
55231+ alloclen = alloclen + pos - *ppos;
55232 if (copy_to_user(buf,
55233 dev->rawdescriptors[i] + (*ppos - pos),
55234 min(len, alloclen))) {
55235diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
55236index 45a915c..09f9735 100644
55237--- a/drivers/usb/core/hcd.c
55238+++ b/drivers/usb/core/hcd.c
55239@@ -1551,7 +1551,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
55240 */
55241 usb_get_urb(urb);
55242 atomic_inc(&urb->use_count);
55243- atomic_inc(&urb->dev->urbnum);
55244+ atomic_inc_unchecked(&urb->dev->urbnum);
55245 usbmon_urb_submit(&hcd->self, urb);
55246
55247 /* NOTE requirements on root-hub callers (usbfs and the hub
55248@@ -1578,7 +1578,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
55249 urb->hcpriv = NULL;
55250 INIT_LIST_HEAD(&urb->urb_list);
55251 atomic_dec(&urb->use_count);
55252- atomic_dec(&urb->dev->urbnum);
55253+ atomic_dec_unchecked(&urb->dev->urbnum);
55254 if (atomic_read(&urb->reject))
55255 wake_up(&usb_kill_urb_queue);
55256 usb_put_urb(urb);
55257diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
55258index 3b71516..1f26579 100644
55259--- a/drivers/usb/core/hub.c
55260+++ b/drivers/usb/core/hub.c
55261@@ -26,6 +26,7 @@
55262 #include <linux/mutex.h>
55263 #include <linux/random.h>
55264 #include <linux/pm_qos.h>
55265+#include <linux/grsecurity.h>
55266
55267 #include <asm/uaccess.h>
55268 #include <asm/byteorder.h>
55269@@ -4665,6 +4666,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
55270 goto done;
55271 return;
55272 }
55273+
55274+ if (gr_handle_new_usb())
55275+ goto done;
55276+
55277 if (hub_is_superspeed(hub->hdev))
55278 unit_load = 150;
55279 else
55280diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
55281index f368d20..0c30ac5 100644
55282--- a/drivers/usb/core/message.c
55283+++ b/drivers/usb/core/message.c
55284@@ -128,7 +128,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
55285 * Return: If successful, the number of bytes transferred. Otherwise, a negative
55286 * error number.
55287 */
55288-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
55289+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
55290 __u8 requesttype, __u16 value, __u16 index, void *data,
55291 __u16 size, int timeout)
55292 {
55293@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
55294 * If successful, 0. Otherwise a negative error number. The number of actual
55295 * bytes transferred will be stored in the @actual_length parameter.
55296 */
55297-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
55298+int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
55299 void *data, int len, int *actual_length, int timeout)
55300 {
55301 return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
55302@@ -220,7 +220,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
55303 * bytes transferred will be stored in the @actual_length parameter.
55304 *
55305 */
55306-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
55307+int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
55308 void *data, int len, int *actual_length, int timeout)
55309 {
55310 struct urb *urb;
55311diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
55312index d269738..7340cd7 100644
55313--- a/drivers/usb/core/sysfs.c
55314+++ b/drivers/usb/core/sysfs.c
55315@@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
55316 struct usb_device *udev;
55317
55318 udev = to_usb_device(dev);
55319- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
55320+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
55321 }
55322 static DEVICE_ATTR_RO(urbnum);
55323
55324diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
55325index b1fb9ae..4224885 100644
55326--- a/drivers/usb/core/usb.c
55327+++ b/drivers/usb/core/usb.c
55328@@ -431,7 +431,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
55329 set_dev_node(&dev->dev, dev_to_node(bus->controller));
55330 dev->state = USB_STATE_ATTACHED;
55331 dev->lpm_disable_count = 1;
55332- atomic_set(&dev->urbnum, 0);
55333+ atomic_set_unchecked(&dev->urbnum, 0);
55334
55335 INIT_LIST_HEAD(&dev->ep0.urb_list);
55336 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
55337diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
55338index 8cfc319..4868255 100644
55339--- a/drivers/usb/early/ehci-dbgp.c
55340+++ b/drivers/usb/early/ehci-dbgp.c
55341@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
55342
55343 #ifdef CONFIG_KGDB
55344 static struct kgdb_io kgdbdbgp_io_ops;
55345-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
55346+static struct kgdb_io kgdbdbgp_io_ops_console;
55347+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
55348 #else
55349 #define dbgp_kgdb_mode (0)
55350 #endif
55351@@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
55352 .write_char = kgdbdbgp_write_char,
55353 };
55354
55355+static struct kgdb_io kgdbdbgp_io_ops_console = {
55356+ .name = "kgdbdbgp",
55357+ .read_char = kgdbdbgp_read_char,
55358+ .write_char = kgdbdbgp_write_char,
55359+ .is_console = 1
55360+};
55361+
55362 static int kgdbdbgp_wait_time;
55363
55364 static int __init kgdbdbgp_parse_config(char *str)
55365@@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str)
55366 ptr++;
55367 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
55368 }
55369- kgdb_register_io_module(&kgdbdbgp_io_ops);
55370- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
55371+ if (early_dbgp_console.index != -1)
55372+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
55373+ else
55374+ kgdb_register_io_module(&kgdbdbgp_io_ops);
55375
55376 return 0;
55377 }
55378diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
55379index 9719abf..789d5d9 100644
55380--- a/drivers/usb/gadget/function/f_uac1.c
55381+++ b/drivers/usb/gadget/function/f_uac1.c
55382@@ -14,6 +14,7 @@
55383 #include <linux/module.h>
55384 #include <linux/device.h>
55385 #include <linux/atomic.h>
55386+#include <linux/module.h>
55387
55388 #include "u_uac1.h"
55389
55390diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
55391index 491082a..dfd7d17 100644
55392--- a/drivers/usb/gadget/function/u_serial.c
55393+++ b/drivers/usb/gadget/function/u_serial.c
55394@@ -729,9 +729,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
55395 spin_lock_irq(&port->port_lock);
55396
55397 /* already open? Great. */
55398- if (port->port.count) {
55399+ if (atomic_read(&port->port.count)) {
55400 status = 0;
55401- port->port.count++;
55402+ atomic_inc(&port->port.count);
55403
55404 /* currently opening/closing? wait ... */
55405 } else if (port->openclose) {
55406@@ -790,7 +790,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
55407 tty->driver_data = port;
55408 port->port.tty = tty;
55409
55410- port->port.count = 1;
55411+ atomic_set(&port->port.count, 1);
55412 port->openclose = false;
55413
55414 /* if connected, start the I/O stream */
55415@@ -832,11 +832,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
55416
55417 spin_lock_irq(&port->port_lock);
55418
55419- if (port->port.count != 1) {
55420- if (port->port.count == 0)
55421+ if (atomic_read(&port->port.count) != 1) {
55422+ if (atomic_read(&port->port.count) == 0)
55423 WARN_ON(1);
55424 else
55425- --port->port.count;
55426+ atomic_dec(&port->port.count);
55427 goto exit;
55428 }
55429
55430@@ -846,7 +846,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
55431 * and sleep if necessary
55432 */
55433 port->openclose = true;
55434- port->port.count = 0;
55435+ atomic_set(&port->port.count, 0);
55436
55437 gser = port->port_usb;
55438 if (gser && gser->disconnect)
55439@@ -1062,7 +1062,7 @@ static int gs_closed(struct gs_port *port)
55440 int cond;
55441
55442 spin_lock_irq(&port->port_lock);
55443- cond = (port->port.count == 0) && !port->openclose;
55444+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
55445 spin_unlock_irq(&port->port_lock);
55446 return cond;
55447 }
55448@@ -1205,7 +1205,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
55449 /* if it's already open, start I/O ... and notify the serial
55450 * protocol about open/close status (connect/disconnect).
55451 */
55452- if (port->port.count) {
55453+ if (atomic_read(&port->port.count)) {
55454 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
55455 gs_start_io(port);
55456 if (gser->connect)
55457@@ -1252,7 +1252,7 @@ void gserial_disconnect(struct gserial *gser)
55458
55459 port->port_usb = NULL;
55460 gser->ioport = NULL;
55461- if (port->port.count > 0 || port->openclose) {
55462+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
55463 wake_up_interruptible(&port->drain_wait);
55464 if (port->port.tty)
55465 tty_hangup(port->port.tty);
55466@@ -1268,7 +1268,7 @@ void gserial_disconnect(struct gserial *gser)
55467
55468 /* finally, free any unused/unusable I/O buffers */
55469 spin_lock_irqsave(&port->port_lock, flags);
55470- if (port->port.count == 0 && !port->openclose)
55471+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
55472 gs_buf_free(&port->port_write_buf);
55473 gs_free_requests(gser->out, &port->read_pool, NULL);
55474 gs_free_requests(gser->out, &port->read_queue, NULL);
55475diff --git a/drivers/usb/gadget/function/u_uac1.c b/drivers/usb/gadget/function/u_uac1.c
55476index c78c841..48fd281 100644
55477--- a/drivers/usb/gadget/function/u_uac1.c
55478+++ b/drivers/usb/gadget/function/u_uac1.c
55479@@ -17,6 +17,7 @@
55480 #include <linux/ctype.h>
55481 #include <linux/random.h>
55482 #include <linux/syscalls.h>
55483+#include <linux/module.h>
55484
55485 #include "u_uac1.h"
55486
55487diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
55488index 7354d01..299478e 100644
55489--- a/drivers/usb/host/ehci-hub.c
55490+++ b/drivers/usb/host/ehci-hub.c
55491@@ -772,7 +772,7 @@ static struct urb *request_single_step_set_feature_urb(
55492 urb->transfer_flags = URB_DIR_IN;
55493 usb_get_urb(urb);
55494 atomic_inc(&urb->use_count);
55495- atomic_inc(&urb->dev->urbnum);
55496+ atomic_inc_unchecked(&urb->dev->urbnum);
55497 urb->setup_dma = dma_map_single(
55498 hcd->self.controller,
55499 urb->setup_packet,
55500@@ -839,7 +839,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
55501 urb->status = -EINPROGRESS;
55502 usb_get_urb(urb);
55503 atomic_inc(&urb->use_count);
55504- atomic_inc(&urb->dev->urbnum);
55505+ atomic_inc_unchecked(&urb->dev->urbnum);
55506 retval = submit_single_step_set_feature(hcd, urb, 0);
55507 if (!retval && !wait_for_completion_timeout(&done,
55508 msecs_to_jiffies(2000))) {
55509diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
55510index 1db0626..4948782 100644
55511--- a/drivers/usb/host/hwa-hc.c
55512+++ b/drivers/usb/host/hwa-hc.c
55513@@ -337,7 +337,10 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
55514 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
55515 struct wahc *wa = &hwahc->wa;
55516 struct device *dev = &wa->usb_iface->dev;
55517- u8 mas_le[UWB_NUM_MAS/8];
55518+ u8 *mas_le = kmalloc(UWB_NUM_MAS/8, GFP_KERNEL);
55519+
55520+ if (mas_le == NULL)
55521+ return -ENOMEM;
55522
55523 /* Set the stream index */
55524 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
55525@@ -356,10 +359,12 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
55526 WUSB_REQ_SET_WUSB_MAS,
55527 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
55528 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
55529- mas_le, 32, USB_CTRL_SET_TIMEOUT);
55530+ mas_le, UWB_NUM_MAS/8, USB_CTRL_SET_TIMEOUT);
55531 if (result < 0)
55532 dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
55533 out:
55534+ kfree(mas_le);
55535+
55536 return result;
55537 }
55538
55539diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
55540index b3d245e..99549ed 100644
55541--- a/drivers/usb/misc/appledisplay.c
55542+++ b/drivers/usb/misc/appledisplay.c
55543@@ -84,7 +84,7 @@ struct appledisplay {
55544 struct mutex sysfslock; /* concurrent read and write */
55545 };
55546
55547-static atomic_t count_displays = ATOMIC_INIT(0);
55548+static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
55549 static struct workqueue_struct *wq;
55550
55551 static void appledisplay_complete(struct urb *urb)
55552@@ -288,7 +288,7 @@ static int appledisplay_probe(struct usb_interface *iface,
55553
55554 /* Register backlight device */
55555 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
55556- atomic_inc_return(&count_displays) - 1);
55557+ atomic_inc_return_unchecked(&count_displays) - 1);
55558 memset(&props, 0, sizeof(struct backlight_properties));
55559 props.type = BACKLIGHT_RAW;
55560 props.max_brightness = 0xff;
55561diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
55562index 3806e70..55c508b 100644
55563--- a/drivers/usb/serial/console.c
55564+++ b/drivers/usb/serial/console.c
55565@@ -126,7 +126,7 @@ static int usb_console_setup(struct console *co, char *options)
55566
55567 info->port = port;
55568
55569- ++port->port.count;
55570+ atomic_inc(&port->port.count);
55571 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
55572 if (serial->type->set_termios) {
55573 /*
55574@@ -175,7 +175,7 @@ static int usb_console_setup(struct console *co, char *options)
55575 }
55576 /* Now that any required fake tty operations are completed restore
55577 * the tty port count */
55578- --port->port.count;
55579+ atomic_dec(&port->port.count);
55580 /* The console is special in terms of closing the device so
55581 * indicate this port is now acting as a system console. */
55582 port->port.console = 1;
55583@@ -188,7 +188,7 @@ static int usb_console_setup(struct console *co, char *options)
55584 put_tty:
55585 tty_kref_put(tty);
55586 reset_open_count:
55587- port->port.count = 0;
55588+ atomic_set(&port->port.count, 0);
55589 usb_autopm_put_interface(serial->interface);
55590 error_get_interface:
55591 usb_serial_put(serial);
55592@@ -199,7 +199,7 @@ static int usb_console_setup(struct console *co, char *options)
55593 static void usb_console_write(struct console *co,
55594 const char *buf, unsigned count)
55595 {
55596- static struct usbcons_info *info = &usbcons_info;
55597+ struct usbcons_info *info = &usbcons_info;
55598 struct usb_serial_port *port = info->port;
55599 struct usb_serial *serial;
55600 int retval = -ENODEV;
55601diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
55602index 307e339..6aa97cb 100644
55603--- a/drivers/usb/storage/usb.h
55604+++ b/drivers/usb/storage/usb.h
55605@@ -63,7 +63,7 @@ struct us_unusual_dev {
55606 __u8 useProtocol;
55607 __u8 useTransport;
55608 int (*initFunction)(struct us_data *);
55609-};
55610+} __do_const;
55611
55612
55613 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
55614diff --git a/drivers/usb/usbip/vhci.h b/drivers/usb/usbip/vhci.h
55615index a863a98..d272795 100644
55616--- a/drivers/usb/usbip/vhci.h
55617+++ b/drivers/usb/usbip/vhci.h
55618@@ -83,7 +83,7 @@ struct vhci_hcd {
55619 unsigned resuming:1;
55620 unsigned long re_timeout;
55621
55622- atomic_t seqnum;
55623+ atomic_unchecked_t seqnum;
55624
55625 /*
55626 * NOTE:
55627diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
55628index 11f6f61..1087910 100644
55629--- a/drivers/usb/usbip/vhci_hcd.c
55630+++ b/drivers/usb/usbip/vhci_hcd.c
55631@@ -440,7 +440,7 @@ static void vhci_tx_urb(struct urb *urb)
55632
55633 spin_lock(&vdev->priv_lock);
55634
55635- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
55636+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
55637 if (priv->seqnum == 0xffff)
55638 dev_info(&urb->dev->dev, "seqnum max\n");
55639
55640@@ -685,7 +685,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
55641 return -ENOMEM;
55642 }
55643
55644- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
55645+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
55646 if (unlink->seqnum == 0xffff)
55647 pr_info("seqnum max\n");
55648
55649@@ -889,7 +889,7 @@ static int vhci_start(struct usb_hcd *hcd)
55650 vdev->rhport = rhport;
55651 }
55652
55653- atomic_set(&vhci->seqnum, 0);
55654+ atomic_set_unchecked(&vhci->seqnum, 0);
55655 spin_lock_init(&vhci->lock);
55656
55657 hcd->power_budget = 0; /* no limit */
55658diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
55659index 00e4a54..d676f85 100644
55660--- a/drivers/usb/usbip/vhci_rx.c
55661+++ b/drivers/usb/usbip/vhci_rx.c
55662@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
55663 if (!urb) {
55664 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
55665 pr_info("max seqnum %d\n",
55666- atomic_read(&the_controller->seqnum));
55667+ atomic_read_unchecked(&the_controller->seqnum));
55668 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
55669 return;
55670 }
55671diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
55672index edc7267..9f65ce2 100644
55673--- a/drivers/usb/wusbcore/wa-hc.h
55674+++ b/drivers/usb/wusbcore/wa-hc.h
55675@@ -240,7 +240,7 @@ struct wahc {
55676 spinlock_t xfer_list_lock;
55677 struct work_struct xfer_enqueue_work;
55678 struct work_struct xfer_error_work;
55679- atomic_t xfer_id_count;
55680+ atomic_unchecked_t xfer_id_count;
55681
55682 kernel_ulong_t quirks;
55683 };
55684@@ -305,7 +305,7 @@ static inline void wa_init(struct wahc *wa)
55685 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
55686 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
55687 wa->dto_in_use = 0;
55688- atomic_set(&wa->xfer_id_count, 1);
55689+ atomic_set_unchecked(&wa->xfer_id_count, 1);
55690 /* init the buf in URBs */
55691 for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
55692 usb_init_urb(&(wa->buf_in_urbs[index]));
55693diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
55694index 69af4fd..da390d7 100644
55695--- a/drivers/usb/wusbcore/wa-xfer.c
55696+++ b/drivers/usb/wusbcore/wa-xfer.c
55697@@ -314,7 +314,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
55698 */
55699 static void wa_xfer_id_init(struct wa_xfer *xfer)
55700 {
55701- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
55702+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
55703 }
55704
55705 /* Return the xfer's ID. */
55706diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
55707index 837d177..170724af 100644
55708--- a/drivers/vfio/vfio.c
55709+++ b/drivers/vfio/vfio.c
55710@@ -518,7 +518,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
55711 return 0;
55712
55713 /* TODO Prevent device auto probing */
55714- WARN("Device %s added to live group %d!\n", dev_name(dev),
55715+ WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
55716 iommu_group_id(group->iommu_group));
55717
55718 return 0;
55719diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
55720index 3bb02c6..a01ff38 100644
55721--- a/drivers/vhost/vringh.c
55722+++ b/drivers/vhost/vringh.c
55723@@ -551,7 +551,7 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
55724 static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
55725 {
55726 __virtio16 v = 0;
55727- int rc = get_user(v, (__force __virtio16 __user *)p);
55728+ int rc = get_user(v, (__force_user __virtio16 *)p);
55729 *val = vringh16_to_cpu(vrh, v);
55730 return rc;
55731 }
55732@@ -559,12 +559,12 @@ static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio
55733 static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
55734 {
55735 __virtio16 v = cpu_to_vringh16(vrh, val);
55736- return put_user(v, (__force __virtio16 __user *)p);
55737+ return put_user(v, (__force_user __virtio16 *)p);
55738 }
55739
55740 static inline int copydesc_user(void *dst, const void *src, size_t len)
55741 {
55742- return copy_from_user(dst, (__force void __user *)src, len) ?
55743+ return copy_from_user(dst, (void __force_user *)src, len) ?
55744 -EFAULT : 0;
55745 }
55746
55747@@ -572,19 +572,19 @@ static inline int putused_user(struct vring_used_elem *dst,
55748 const struct vring_used_elem *src,
55749 unsigned int num)
55750 {
55751- return copy_to_user((__force void __user *)dst, src,
55752+ return copy_to_user((void __force_user *)dst, src,
55753 sizeof(*dst) * num) ? -EFAULT : 0;
55754 }
55755
55756 static inline int xfer_from_user(void *src, void *dst, size_t len)
55757 {
55758- return copy_from_user(dst, (__force void __user *)src, len) ?
55759+ return copy_from_user(dst, (void __force_user *)src, len) ?
55760 -EFAULT : 0;
55761 }
55762
55763 static inline int xfer_to_user(void *dst, void *src, size_t len)
55764 {
55765- return copy_to_user((__force void __user *)dst, src, len) ?
55766+ return copy_to_user((void __force_user *)dst, src, len) ?
55767 -EFAULT : 0;
55768 }
55769
55770@@ -621,9 +621,9 @@ int vringh_init_user(struct vringh *vrh, u64 features,
55771 vrh->last_used_idx = 0;
55772 vrh->vring.num = num;
55773 /* vring expects kernel addresses, but only used via accessors. */
55774- vrh->vring.desc = (__force struct vring_desc *)desc;
55775- vrh->vring.avail = (__force struct vring_avail *)avail;
55776- vrh->vring.used = (__force struct vring_used *)used;
55777+ vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
55778+ vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
55779+ vrh->vring.used = (__force_kernel struct vring_used *)used;
55780 return 0;
55781 }
55782 EXPORT_SYMBOL(vringh_init_user);
55783@@ -826,7 +826,7 @@ static inline int getu16_kern(const struct vringh *vrh,
55784
55785 static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
55786 {
55787- ACCESS_ONCE(*p) = cpu_to_vringh16(vrh, val);
55788+ ACCESS_ONCE_RW(*p) = cpu_to_vringh16(vrh, val);
55789 return 0;
55790 }
55791
55792diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
55793index 84a110a..96312c3 100644
55794--- a/drivers/video/backlight/kb3886_bl.c
55795+++ b/drivers/video/backlight/kb3886_bl.c
55796@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
55797 static unsigned long kb3886bl_flags;
55798 #define KB3886BL_SUSPENDED 0x01
55799
55800-static struct dmi_system_id kb3886bl_device_table[] __initdata = {
55801+static const struct dmi_system_id kb3886bl_device_table[] __initconst = {
55802 {
55803 .ident = "Sahara Touch-iT",
55804 .matches = {
55805diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
55806index 1b0b233..6f34c2c 100644
55807--- a/drivers/video/fbdev/arcfb.c
55808+++ b/drivers/video/fbdev/arcfb.c
55809@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
55810 return -ENOSPC;
55811
55812 err = 0;
55813- if ((count + p) > fbmemlength) {
55814+ if (count > (fbmemlength - p)) {
55815 count = fbmemlength - p;
55816 err = -ENOSPC;
55817 }
55818diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
55819index aedf2fb..47c9aca 100644
55820--- a/drivers/video/fbdev/aty/aty128fb.c
55821+++ b/drivers/video/fbdev/aty/aty128fb.c
55822@@ -149,7 +149,7 @@ enum {
55823 };
55824
55825 /* Must match above enum */
55826-static char * const r128_family[] = {
55827+static const char * const r128_family[] = {
55828 "AGP",
55829 "PCI",
55830 "PRO AGP",
55831diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
55832index 8789e48..698fe4c 100644
55833--- a/drivers/video/fbdev/aty/atyfb_base.c
55834+++ b/drivers/video/fbdev/aty/atyfb_base.c
55835@@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
55836 par->accel_flags = var->accel_flags; /* hack */
55837
55838 if (var->accel_flags) {
55839- info->fbops->fb_sync = atyfb_sync;
55840+ pax_open_kernel();
55841+ *(void **)&info->fbops->fb_sync = atyfb_sync;
55842+ pax_close_kernel();
55843 info->flags &= ~FBINFO_HWACCEL_DISABLED;
55844 } else {
55845- info->fbops->fb_sync = NULL;
55846+ pax_open_kernel();
55847+ *(void **)&info->fbops->fb_sync = NULL;
55848+ pax_close_kernel();
55849 info->flags |= FBINFO_HWACCEL_DISABLED;
55850 }
55851
55852diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
55853index 2fa0317..4983f2a 100644
55854--- a/drivers/video/fbdev/aty/mach64_cursor.c
55855+++ b/drivers/video/fbdev/aty/mach64_cursor.c
55856@@ -8,6 +8,7 @@
55857 #include "../core/fb_draw.h"
55858
55859 #include <asm/io.h>
55860+#include <asm/pgtable.h>
55861
55862 #ifdef __sparc__
55863 #include <asm/fbio.h>
55864@@ -218,7 +219,9 @@ int aty_init_cursor(struct fb_info *info)
55865 info->sprite.buf_align = 16; /* and 64 lines tall. */
55866 info->sprite.flags = FB_PIXMAP_IO;
55867
55868- info->fbops->fb_cursor = atyfb_cursor;
55869+ pax_open_kernel();
55870+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
55871+ pax_close_kernel();
55872
55873 return 0;
55874 }
55875diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
55876index d6cab1f..112f680 100644
55877--- a/drivers/video/fbdev/core/fb_defio.c
55878+++ b/drivers/video/fbdev/core/fb_defio.c
55879@@ -207,7 +207,9 @@ void fb_deferred_io_init(struct fb_info *info)
55880
55881 BUG_ON(!fbdefio);
55882 mutex_init(&fbdefio->lock);
55883- info->fbops->fb_mmap = fb_deferred_io_mmap;
55884+ pax_open_kernel();
55885+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
55886+ pax_close_kernel();
55887 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
55888 INIT_LIST_HEAD(&fbdefio->pagelist);
55889 if (fbdefio->delay == 0) /* set a default of 1 s */
55890@@ -238,7 +240,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
55891 page->mapping = NULL;
55892 }
55893
55894- info->fbops->fb_mmap = NULL;
55895+ *(void **)&info->fbops->fb_mmap = NULL;
55896 mutex_destroy(&fbdefio->lock);
55897 }
55898 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
55899diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
55900index 0705d88..d9429bf 100644
55901--- a/drivers/video/fbdev/core/fbmem.c
55902+++ b/drivers/video/fbdev/core/fbmem.c
55903@@ -1301,7 +1301,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
55904 __u32 data;
55905 int err;
55906
55907- err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
55908+ err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
55909
55910 data = (__u32) (unsigned long) fix->smem_start;
55911 err |= put_user(data, &fix32->smem_start);
55912diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
55913index 4254336..282567e 100644
55914--- a/drivers/video/fbdev/hyperv_fb.c
55915+++ b/drivers/video/fbdev/hyperv_fb.c
55916@@ -240,7 +240,7 @@ static uint screen_fb_size;
55917 static inline int synthvid_send(struct hv_device *hdev,
55918 struct synthvid_msg *msg)
55919 {
55920- static atomic64_t request_id = ATOMIC64_INIT(0);
55921+ static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
55922 int ret;
55923
55924 msg->pipe_hdr.type = PIPE_MSG_DATA;
55925@@ -248,7 +248,7 @@ static inline int synthvid_send(struct hv_device *hdev,
55926
55927 ret = vmbus_sendpacket(hdev->channel, msg,
55928 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
55929- atomic64_inc_return(&request_id),
55930+ atomic64_inc_return_unchecked(&request_id),
55931 VM_PKT_DATA_INBAND, 0);
55932
55933 if (ret)
55934diff --git a/drivers/video/fbdev/i810/i810_accel.c b/drivers/video/fbdev/i810/i810_accel.c
55935index 7672d2e..b56437f 100644
55936--- a/drivers/video/fbdev/i810/i810_accel.c
55937+++ b/drivers/video/fbdev/i810/i810_accel.c
55938@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
55939 }
55940 }
55941 printk("ringbuffer lockup!!!\n");
55942+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
55943 i810_report_error(mmio);
55944 par->dev_flags |= LOCKUP;
55945 info->pixmap.scan_align = 1;
55946diff --git a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
55947index a01147f..5d896f8 100644
55948--- a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
55949+++ b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
55950@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
55951
55952 #ifdef CONFIG_FB_MATROX_MYSTIQUE
55953 struct matrox_switch matrox_mystique = {
55954- MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
55955+ .preinit = MGA1064_preinit,
55956+ .reset = MGA1064_reset,
55957+ .init = MGA1064_init,
55958+ .restore = MGA1064_restore,
55959 };
55960 EXPORT_SYMBOL(matrox_mystique);
55961 #endif
55962
55963 #ifdef CONFIG_FB_MATROX_G
55964 struct matrox_switch matrox_G100 = {
55965- MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
55966+ .preinit = MGAG100_preinit,
55967+ .reset = MGAG100_reset,
55968+ .init = MGAG100_init,
55969+ .restore = MGAG100_restore,
55970 };
55971 EXPORT_SYMBOL(matrox_G100);
55972 #endif
55973diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
55974index 195ad7c..09743fc 100644
55975--- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
55976+++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
55977@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
55978 }
55979
55980 struct matrox_switch matrox_millennium = {
55981- Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
55982+ .preinit = Ti3026_preinit,
55983+ .reset = Ti3026_reset,
55984+ .init = Ti3026_init,
55985+ .restore = Ti3026_restore
55986 };
55987 EXPORT_SYMBOL(matrox_millennium);
55988 #endif
55989diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
55990index fe92eed..106e085 100644
55991--- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
55992+++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
55993@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
55994 struct mb862xxfb_par *par = info->par;
55995
55996 if (info->var.bits_per_pixel == 32) {
55997- info->fbops->fb_fillrect = cfb_fillrect;
55998- info->fbops->fb_copyarea = cfb_copyarea;
55999- info->fbops->fb_imageblit = cfb_imageblit;
56000+ pax_open_kernel();
56001+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
56002+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
56003+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
56004+ pax_close_kernel();
56005 } else {
56006 outreg(disp, GC_L0EM, 3);
56007- info->fbops->fb_fillrect = mb86290fb_fillrect;
56008- info->fbops->fb_copyarea = mb86290fb_copyarea;
56009- info->fbops->fb_imageblit = mb86290fb_imageblit;
56010+ pax_open_kernel();
56011+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
56012+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
56013+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
56014+ pax_close_kernel();
56015 }
56016 outreg(draw, GDC_REG_DRAW_BASE, 0);
56017 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
56018diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
56019index def0412..fed6529 100644
56020--- a/drivers/video/fbdev/nvidia/nvidia.c
56021+++ b/drivers/video/fbdev/nvidia/nvidia.c
56022@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
56023 info->fix.line_length = (info->var.xres_virtual *
56024 info->var.bits_per_pixel) >> 3;
56025 if (info->var.accel_flags) {
56026- info->fbops->fb_imageblit = nvidiafb_imageblit;
56027- info->fbops->fb_fillrect = nvidiafb_fillrect;
56028- info->fbops->fb_copyarea = nvidiafb_copyarea;
56029- info->fbops->fb_sync = nvidiafb_sync;
56030+ pax_open_kernel();
56031+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
56032+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
56033+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
56034+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
56035+ pax_close_kernel();
56036 info->pixmap.scan_align = 4;
56037 info->flags &= ~FBINFO_HWACCEL_DISABLED;
56038 info->flags |= FBINFO_READS_FAST;
56039 NVResetGraphics(info);
56040 } else {
56041- info->fbops->fb_imageblit = cfb_imageblit;
56042- info->fbops->fb_fillrect = cfb_fillrect;
56043- info->fbops->fb_copyarea = cfb_copyarea;
56044- info->fbops->fb_sync = NULL;
56045+ pax_open_kernel();
56046+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
56047+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
56048+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
56049+ *(void **)&info->fbops->fb_sync = NULL;
56050+ pax_close_kernel();
56051 info->pixmap.scan_align = 1;
56052 info->flags |= FBINFO_HWACCEL_DISABLED;
56053 info->flags &= ~FBINFO_READS_FAST;
56054@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
56055 info->pixmap.size = 8 * 1024;
56056 info->pixmap.flags = FB_PIXMAP_SYSTEM;
56057
56058- if (!hwcur)
56059- info->fbops->fb_cursor = NULL;
56060+ if (!hwcur) {
56061+ pax_open_kernel();
56062+ *(void **)&info->fbops->fb_cursor = NULL;
56063+ pax_close_kernel();
56064+ }
56065
56066 info->var.accel_flags = (!noaccel);
56067
56068diff --git a/drivers/video/fbdev/omap2/dss/display.c b/drivers/video/fbdev/omap2/dss/display.c
56069index 2412a0d..294215b 100644
56070--- a/drivers/video/fbdev/omap2/dss/display.c
56071+++ b/drivers/video/fbdev/omap2/dss/display.c
56072@@ -161,12 +161,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
56073 if (dssdev->name == NULL)
56074 dssdev->name = dssdev->alias;
56075
56076+ pax_open_kernel();
56077 if (drv && drv->get_resolution == NULL)
56078- drv->get_resolution = omapdss_default_get_resolution;
56079+ *(void **)&drv->get_resolution = omapdss_default_get_resolution;
56080 if (drv && drv->get_recommended_bpp == NULL)
56081- drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
56082+ *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
56083 if (drv && drv->get_timings == NULL)
56084- drv->get_timings = omapdss_default_get_timings;
56085+ *(void **)&drv->get_timings = omapdss_default_get_timings;
56086+ pax_close_kernel();
56087
56088 mutex_lock(&panel_list_mutex);
56089 list_add_tail(&dssdev->panel_list, &panel_list);
56090diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
56091index 83433cb..71e9b98 100644
56092--- a/drivers/video/fbdev/s1d13xxxfb.c
56093+++ b/drivers/video/fbdev/s1d13xxxfb.c
56094@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
56095
56096 switch(prod_id) {
56097 case S1D13506_PROD_ID: /* activate acceleration */
56098- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
56099- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
56100+ pax_open_kernel();
56101+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
56102+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
56103+ pax_close_kernel();
56104 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
56105 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
56106 break;
56107diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
56108index d3013cd..95b8285 100644
56109--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
56110+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
56111@@ -439,9 +439,9 @@ static unsigned long lcdc_sys_read_data(void *handle)
56112 }
56113
56114 static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
56115- lcdc_sys_write_index,
56116- lcdc_sys_write_data,
56117- lcdc_sys_read_data,
56118+ .write_index = lcdc_sys_write_index,
56119+ .write_data = lcdc_sys_write_data,
56120+ .read_data = lcdc_sys_read_data,
56121 };
56122
56123 static int sh_mobile_lcdc_sginit(struct fb_info *info,
56124diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
56125index 9279e5f..d5f5276 100644
56126--- a/drivers/video/fbdev/smscufx.c
56127+++ b/drivers/video/fbdev/smscufx.c
56128@@ -1174,7 +1174,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
56129 fb_deferred_io_cleanup(info);
56130 kfree(info->fbdefio);
56131 info->fbdefio = NULL;
56132- info->fbops->fb_mmap = ufx_ops_mmap;
56133+ pax_open_kernel();
56134+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
56135+ pax_close_kernel();
56136 }
56137
56138 pr_debug("released /dev/fb%d user=%d count=%d",
56139diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
56140index ff2b873..626a8d5 100644
56141--- a/drivers/video/fbdev/udlfb.c
56142+++ b/drivers/video/fbdev/udlfb.c
56143@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
56144 dlfb_urb_completion(urb);
56145
56146 error:
56147- atomic_add(bytes_sent, &dev->bytes_sent);
56148- atomic_add(bytes_identical, &dev->bytes_identical);
56149- atomic_add(width*height*2, &dev->bytes_rendered);
56150+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
56151+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
56152+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
56153 end_cycles = get_cycles();
56154- atomic_add(((unsigned int) ((end_cycles - start_cycles)
56155+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
56156 >> 10)), /* Kcycles */
56157 &dev->cpu_kcycles_used);
56158
56159@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
56160 dlfb_urb_completion(urb);
56161
56162 error:
56163- atomic_add(bytes_sent, &dev->bytes_sent);
56164- atomic_add(bytes_identical, &dev->bytes_identical);
56165- atomic_add(bytes_rendered, &dev->bytes_rendered);
56166+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
56167+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
56168+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
56169 end_cycles = get_cycles();
56170- atomic_add(((unsigned int) ((end_cycles - start_cycles)
56171+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
56172 >> 10)), /* Kcycles */
56173 &dev->cpu_kcycles_used);
56174 }
56175@@ -991,7 +991,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
56176 fb_deferred_io_cleanup(info);
56177 kfree(info->fbdefio);
56178 info->fbdefio = NULL;
56179- info->fbops->fb_mmap = dlfb_ops_mmap;
56180+ pax_open_kernel();
56181+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
56182+ pax_close_kernel();
56183 }
56184
56185 pr_warn("released /dev/fb%d user=%d count=%d\n",
56186@@ -1373,7 +1375,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
56187 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56188 struct dlfb_data *dev = fb_info->par;
56189 return snprintf(buf, PAGE_SIZE, "%u\n",
56190- atomic_read(&dev->bytes_rendered));
56191+ atomic_read_unchecked(&dev->bytes_rendered));
56192 }
56193
56194 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
56195@@ -1381,7 +1383,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
56196 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56197 struct dlfb_data *dev = fb_info->par;
56198 return snprintf(buf, PAGE_SIZE, "%u\n",
56199- atomic_read(&dev->bytes_identical));
56200+ atomic_read_unchecked(&dev->bytes_identical));
56201 }
56202
56203 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
56204@@ -1389,7 +1391,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
56205 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56206 struct dlfb_data *dev = fb_info->par;
56207 return snprintf(buf, PAGE_SIZE, "%u\n",
56208- atomic_read(&dev->bytes_sent));
56209+ atomic_read_unchecked(&dev->bytes_sent));
56210 }
56211
56212 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
56213@@ -1397,7 +1399,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
56214 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56215 struct dlfb_data *dev = fb_info->par;
56216 return snprintf(buf, PAGE_SIZE, "%u\n",
56217- atomic_read(&dev->cpu_kcycles_used));
56218+ atomic_read_unchecked(&dev->cpu_kcycles_used));
56219 }
56220
56221 static ssize_t edid_show(
56222@@ -1457,10 +1459,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
56223 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56224 struct dlfb_data *dev = fb_info->par;
56225
56226- atomic_set(&dev->bytes_rendered, 0);
56227- atomic_set(&dev->bytes_identical, 0);
56228- atomic_set(&dev->bytes_sent, 0);
56229- atomic_set(&dev->cpu_kcycles_used, 0);
56230+ atomic_set_unchecked(&dev->bytes_rendered, 0);
56231+ atomic_set_unchecked(&dev->bytes_identical, 0);
56232+ atomic_set_unchecked(&dev->bytes_sent, 0);
56233+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
56234
56235 return count;
56236 }
56237diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
56238index d32d1c4..46722e6 100644
56239--- a/drivers/video/fbdev/uvesafb.c
56240+++ b/drivers/video/fbdev/uvesafb.c
56241@@ -19,6 +19,7 @@
56242 #include <linux/io.h>
56243 #include <linux/mutex.h>
56244 #include <linux/slab.h>
56245+#include <linux/moduleloader.h>
56246 #include <video/edid.h>
56247 #include <video/uvesafb.h>
56248 #ifdef CONFIG_X86
56249@@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
56250 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
56251 par->pmi_setpal = par->ypan = 0;
56252 } else {
56253+
56254+#ifdef CONFIG_PAX_KERNEXEC
56255+#ifdef CONFIG_MODULES
56256+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
56257+#endif
56258+ if (!par->pmi_code) {
56259+ par->pmi_setpal = par->ypan = 0;
56260+ return 0;
56261+ }
56262+#endif
56263+
56264 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
56265 + task->t.regs.edi);
56266+
56267+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56268+ pax_open_kernel();
56269+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
56270+ pax_close_kernel();
56271+
56272+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
56273+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
56274+#else
56275 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
56276 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
56277+#endif
56278+
56279 printk(KERN_INFO "uvesafb: protected mode interface info at "
56280 "%04x:%04x\n",
56281 (u16)task->t.regs.es, (u16)task->t.regs.edi);
56282@@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
56283 par->ypan = ypan;
56284
56285 if (par->pmi_setpal || par->ypan) {
56286+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
56287 if (__supported_pte_mask & _PAGE_NX) {
56288 par->pmi_setpal = par->ypan = 0;
56289 printk(KERN_WARNING "uvesafb: NX protection is active, "
56290 "better not use the PMI.\n");
56291- } else {
56292+ } else
56293+#endif
56294 uvesafb_vbe_getpmi(task, par);
56295- }
56296 }
56297 #else
56298 /* The protected mode interface is not available on non-x86. */
56299@@ -1452,8 +1476,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
56300 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
56301
56302 /* Disable blanking if the user requested so. */
56303- if (!blank)
56304- info->fbops->fb_blank = NULL;
56305+ if (!blank) {
56306+ pax_open_kernel();
56307+ *(void **)&info->fbops->fb_blank = NULL;
56308+ pax_close_kernel();
56309+ }
56310
56311 /*
56312 * Find out how much IO memory is required for the mode with
56313@@ -1524,8 +1551,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
56314 info->flags = FBINFO_FLAG_DEFAULT |
56315 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
56316
56317- if (!par->ypan)
56318- info->fbops->fb_pan_display = NULL;
56319+ if (!par->ypan) {
56320+ pax_open_kernel();
56321+ *(void **)&info->fbops->fb_pan_display = NULL;
56322+ pax_close_kernel();
56323+ }
56324 }
56325
56326 static void uvesafb_init_mtrr(struct fb_info *info)
56327@@ -1786,6 +1816,11 @@ out_mode:
56328 out:
56329 kfree(par->vbe_modes);
56330
56331+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56332+ if (par->pmi_code)
56333+ module_memfree_exec(par->pmi_code);
56334+#endif
56335+
56336 framebuffer_release(info);
56337 return err;
56338 }
56339@@ -1810,6 +1845,11 @@ static int uvesafb_remove(struct platform_device *dev)
56340 kfree(par->vbe_state_orig);
56341 kfree(par->vbe_state_saved);
56342
56343+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56344+ if (par->pmi_code)
56345+ module_memfree_exec(par->pmi_code);
56346+#endif
56347+
56348 framebuffer_release(info);
56349 }
56350 return 0;
56351diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
56352index d79a0ac..2d0c3d4 100644
56353--- a/drivers/video/fbdev/vesafb.c
56354+++ b/drivers/video/fbdev/vesafb.c
56355@@ -9,6 +9,7 @@
56356 */
56357
56358 #include <linux/module.h>
56359+#include <linux/moduleloader.h>
56360 #include <linux/kernel.h>
56361 #include <linux/errno.h>
56362 #include <linux/string.h>
56363@@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */
56364 static int vram_total; /* Set total amount of memory */
56365 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
56366 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
56367-static void (*pmi_start)(void) __read_mostly;
56368-static void (*pmi_pal) (void) __read_mostly;
56369+static void (*pmi_start)(void) __read_only;
56370+static void (*pmi_pal) (void) __read_only;
56371 static int depth __read_mostly;
56372 static int vga_compat __read_mostly;
56373 /* --------------------------------------------------------------------- */
56374@@ -233,6 +234,7 @@ static int vesafb_probe(struct platform_device *dev)
56375 unsigned int size_remap;
56376 unsigned int size_total;
56377 char *option = NULL;
56378+ void *pmi_code = NULL;
56379
56380 /* ignore error return of fb_get_options */
56381 fb_get_options("vesafb", &option);
56382@@ -279,10 +281,6 @@ static int vesafb_probe(struct platform_device *dev)
56383 size_remap = size_total;
56384 vesafb_fix.smem_len = size_remap;
56385
56386-#ifndef __i386__
56387- screen_info.vesapm_seg = 0;
56388-#endif
56389-
56390 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
56391 printk(KERN_WARNING
56392 "vesafb: cannot reserve video memory at 0x%lx\n",
56393@@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
56394 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
56395 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
56396
56397+#ifdef __i386__
56398+
56399+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56400+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
56401+ if (!pmi_code)
56402+#elif !defined(CONFIG_PAX_KERNEXEC)
56403+ if (0)
56404+#endif
56405+
56406+#endif
56407+ screen_info.vesapm_seg = 0;
56408+
56409 if (screen_info.vesapm_seg) {
56410- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
56411- screen_info.vesapm_seg,screen_info.vesapm_off);
56412+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
56413+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
56414 }
56415
56416 if (screen_info.vesapm_seg < 0xc000)
56417@@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
56418
56419 if (ypan || pmi_setpal) {
56420 unsigned short *pmi_base;
56421+
56422 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
56423- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
56424- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
56425+
56426+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56427+ pax_open_kernel();
56428+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
56429+#else
56430+ pmi_code = pmi_base;
56431+#endif
56432+
56433+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
56434+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
56435+
56436+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56437+ pmi_start = ktva_ktla(pmi_start);
56438+ pmi_pal = ktva_ktla(pmi_pal);
56439+ pax_close_kernel();
56440+#endif
56441+
56442 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
56443 if (pmi_base[3]) {
56444 printk(KERN_INFO "vesafb: pmi: ports = ");
56445@@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
56446 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
56447 (ypan ? FBINFO_HWACCEL_YPAN : 0);
56448
56449- if (!ypan)
56450- info->fbops->fb_pan_display = NULL;
56451+ if (!ypan) {
56452+ pax_open_kernel();
56453+ *(void **)&info->fbops->fb_pan_display = NULL;
56454+ pax_close_kernel();
56455+ }
56456
56457 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
56458 err = -ENOMEM;
56459@@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_device *dev)
56460 fb_info(info, "%s frame buffer device\n", info->fix.id);
56461 return 0;
56462 err:
56463+
56464+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56465+ module_memfree_exec(pmi_code);
56466+#endif
56467+
56468 if (info->screen_base)
56469 iounmap(info->screen_base);
56470 framebuffer_release(info);
56471diff --git a/drivers/video/fbdev/via/via_clock.h b/drivers/video/fbdev/via/via_clock.h
56472index 88714ae..16c2e11 100644
56473--- a/drivers/video/fbdev/via/via_clock.h
56474+++ b/drivers/video/fbdev/via/via_clock.h
56475@@ -56,7 +56,7 @@ struct via_clock {
56476
56477 void (*set_engine_pll_state)(u8 state);
56478 void (*set_engine_pll)(struct via_pll_config config);
56479-};
56480+} __no_const;
56481
56482
56483 static inline u32 get_pll_internal_frequency(u32 ref_freq,
56484diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
56485index 3c14e43..2630570 100644
56486--- a/drivers/video/logo/logo_linux_clut224.ppm
56487+++ b/drivers/video/logo/logo_linux_clut224.ppm
56488@@ -2,1603 +2,1123 @@ P3
56489 # Standard 224-color Linux logo
56490 80 80
56491 255
56492- 0 0 0 0 0 0 0 0 0 0 0 0
56493- 0 0 0 0 0 0 0 0 0 0 0 0
56494- 0 0 0 0 0 0 0 0 0 0 0 0
56495- 0 0 0 0 0 0 0 0 0 0 0 0
56496- 0 0 0 0 0 0 0 0 0 0 0 0
56497- 0 0 0 0 0 0 0 0 0 0 0 0
56498- 0 0 0 0 0 0 0 0 0 0 0 0
56499- 0 0 0 0 0 0 0 0 0 0 0 0
56500- 0 0 0 0 0 0 0 0 0 0 0 0
56501- 6 6 6 6 6 6 10 10 10 10 10 10
56502- 10 10 10 6 6 6 6 6 6 6 6 6
56503- 0 0 0 0 0 0 0 0 0 0 0 0
56504- 0 0 0 0 0 0 0 0 0 0 0 0
56505- 0 0 0 0 0 0 0 0 0 0 0 0
56506- 0 0 0 0 0 0 0 0 0 0 0 0
56507- 0 0 0 0 0 0 0 0 0 0 0 0
56508- 0 0 0 0 0 0 0 0 0 0 0 0
56509- 0 0 0 0 0 0 0 0 0 0 0 0
56510- 0 0 0 0 0 0 0 0 0 0 0 0
56511- 0 0 0 0 0 0 0 0 0 0 0 0
56512- 0 0 0 0 0 0 0 0 0 0 0 0
56513- 0 0 0 0 0 0 0 0 0 0 0 0
56514- 0 0 0 0 0 0 0 0 0 0 0 0
56515- 0 0 0 0 0 0 0 0 0 0 0 0
56516- 0 0 0 0 0 0 0 0 0 0 0 0
56517- 0 0 0 0 0 0 0 0 0 0 0 0
56518- 0 0 0 0 0 0 0 0 0 0 0 0
56519- 0 0 0 0 0 0 0 0 0 0 0 0
56520- 0 0 0 6 6 6 10 10 10 14 14 14
56521- 22 22 22 26 26 26 30 30 30 34 34 34
56522- 30 30 30 30 30 30 26 26 26 18 18 18
56523- 14 14 14 10 10 10 6 6 6 0 0 0
56524- 0 0 0 0 0 0 0 0 0 0 0 0
56525- 0 0 0 0 0 0 0 0 0 0 0 0
56526- 0 0 0 0 0 0 0 0 0 0 0 0
56527- 0 0 0 0 0 0 0 0 0 0 0 0
56528- 0 0 0 0 0 0 0 0 0 0 0 0
56529- 0 0 0 0 0 0 0 0 0 0 0 0
56530- 0 0 0 0 0 0 0 0 0 0 0 0
56531- 0 0 0 0 0 0 0 0 0 0 0 0
56532- 0 0 0 0 0 0 0 0 0 0 0 0
56533- 0 0 0 0 0 1 0 0 1 0 0 0
56534- 0 0 0 0 0 0 0 0 0 0 0 0
56535- 0 0 0 0 0 0 0 0 0 0 0 0
56536- 0 0 0 0 0 0 0 0 0 0 0 0
56537- 0 0 0 0 0 0 0 0 0 0 0 0
56538- 0 0 0 0 0 0 0 0 0 0 0 0
56539- 0 0 0 0 0 0 0 0 0 0 0 0
56540- 6 6 6 14 14 14 26 26 26 42 42 42
56541- 54 54 54 66 66 66 78 78 78 78 78 78
56542- 78 78 78 74 74 74 66 66 66 54 54 54
56543- 42 42 42 26 26 26 18 18 18 10 10 10
56544- 6 6 6 0 0 0 0 0 0 0 0 0
56545- 0 0 0 0 0 0 0 0 0 0 0 0
56546- 0 0 0 0 0 0 0 0 0 0 0 0
56547- 0 0 0 0 0 0 0 0 0 0 0 0
56548- 0 0 0 0 0 0 0 0 0 0 0 0
56549- 0 0 0 0 0 0 0 0 0 0 0 0
56550- 0 0 0 0 0 0 0 0 0 0 0 0
56551- 0 0 0 0 0 0 0 0 0 0 0 0
56552- 0 0 0 0 0 0 0 0 0 0 0 0
56553- 0 0 1 0 0 0 0 0 0 0 0 0
56554- 0 0 0 0 0 0 0 0 0 0 0 0
56555- 0 0 0 0 0 0 0 0 0 0 0 0
56556- 0 0 0 0 0 0 0 0 0 0 0 0
56557- 0 0 0 0 0 0 0 0 0 0 0 0
56558- 0 0 0 0 0 0 0 0 0 0 0 0
56559- 0 0 0 0 0 0 0 0 0 10 10 10
56560- 22 22 22 42 42 42 66 66 66 86 86 86
56561- 66 66 66 38 38 38 38 38 38 22 22 22
56562- 26 26 26 34 34 34 54 54 54 66 66 66
56563- 86 86 86 70 70 70 46 46 46 26 26 26
56564- 14 14 14 6 6 6 0 0 0 0 0 0
56565- 0 0 0 0 0 0 0 0 0 0 0 0
56566- 0 0 0 0 0 0 0 0 0 0 0 0
56567- 0 0 0 0 0 0 0 0 0 0 0 0
56568- 0 0 0 0 0 0 0 0 0 0 0 0
56569- 0 0 0 0 0 0 0 0 0 0 0 0
56570- 0 0 0 0 0 0 0 0 0 0 0 0
56571- 0 0 0 0 0 0 0 0 0 0 0 0
56572- 0 0 0 0 0 0 0 0 0 0 0 0
56573- 0 0 1 0 0 1 0 0 1 0 0 0
56574- 0 0 0 0 0 0 0 0 0 0 0 0
56575- 0 0 0 0 0 0 0 0 0 0 0 0
56576- 0 0 0 0 0 0 0 0 0 0 0 0
56577- 0 0 0 0 0 0 0 0 0 0 0 0
56578- 0 0 0 0 0 0 0 0 0 0 0 0
56579- 0 0 0 0 0 0 10 10 10 26 26 26
56580- 50 50 50 82 82 82 58 58 58 6 6 6
56581- 2 2 6 2 2 6 2 2 6 2 2 6
56582- 2 2 6 2 2 6 2 2 6 2 2 6
56583- 6 6 6 54 54 54 86 86 86 66 66 66
56584- 38 38 38 18 18 18 6 6 6 0 0 0
56585- 0 0 0 0 0 0 0 0 0 0 0 0
56586- 0 0 0 0 0 0 0 0 0 0 0 0
56587- 0 0 0 0 0 0 0 0 0 0 0 0
56588- 0 0 0 0 0 0 0 0 0 0 0 0
56589- 0 0 0 0 0 0 0 0 0 0 0 0
56590- 0 0 0 0 0 0 0 0 0 0 0 0
56591- 0 0 0 0 0 0 0 0 0 0 0 0
56592- 0 0 0 0 0 0 0 0 0 0 0 0
56593- 0 0 0 0 0 0 0 0 0 0 0 0
56594- 0 0 0 0 0 0 0 0 0 0 0 0
56595- 0 0 0 0 0 0 0 0 0 0 0 0
56596- 0 0 0 0 0 0 0 0 0 0 0 0
56597- 0 0 0 0 0 0 0 0 0 0 0 0
56598- 0 0 0 0 0 0 0 0 0 0 0 0
56599- 0 0 0 6 6 6 22 22 22 50 50 50
56600- 78 78 78 34 34 34 2 2 6 2 2 6
56601- 2 2 6 2 2 6 2 2 6 2 2 6
56602- 2 2 6 2 2 6 2 2 6 2 2 6
56603- 2 2 6 2 2 6 6 6 6 70 70 70
56604- 78 78 78 46 46 46 22 22 22 6 6 6
56605- 0 0 0 0 0 0 0 0 0 0 0 0
56606- 0 0 0 0 0 0 0 0 0 0 0 0
56607- 0 0 0 0 0 0 0 0 0 0 0 0
56608- 0 0 0 0 0 0 0 0 0 0 0 0
56609- 0 0 0 0 0 0 0 0 0 0 0 0
56610- 0 0 0 0 0 0 0 0 0 0 0 0
56611- 0 0 0 0 0 0 0 0 0 0 0 0
56612- 0 0 0 0 0 0 0 0 0 0 0 0
56613- 0 0 1 0 0 1 0 0 1 0 0 0
56614- 0 0 0 0 0 0 0 0 0 0 0 0
56615- 0 0 0 0 0 0 0 0 0 0 0 0
56616- 0 0 0 0 0 0 0 0 0 0 0 0
56617- 0 0 0 0 0 0 0 0 0 0 0 0
56618- 0 0 0 0 0 0 0 0 0 0 0 0
56619- 6 6 6 18 18 18 42 42 42 82 82 82
56620- 26 26 26 2 2 6 2 2 6 2 2 6
56621- 2 2 6 2 2 6 2 2 6 2 2 6
56622- 2 2 6 2 2 6 2 2 6 14 14 14
56623- 46 46 46 34 34 34 6 6 6 2 2 6
56624- 42 42 42 78 78 78 42 42 42 18 18 18
56625- 6 6 6 0 0 0 0 0 0 0 0 0
56626- 0 0 0 0 0 0 0 0 0 0 0 0
56627- 0 0 0 0 0 0 0 0 0 0 0 0
56628- 0 0 0 0 0 0 0 0 0 0 0 0
56629- 0 0 0 0 0 0 0 0 0 0 0 0
56630- 0 0 0 0 0 0 0 0 0 0 0 0
56631- 0 0 0 0 0 0 0 0 0 0 0 0
56632- 0 0 0 0 0 0 0 0 0 0 0 0
56633- 0 0 1 0 0 0 0 0 1 0 0 0
56634- 0 0 0 0 0 0 0 0 0 0 0 0
56635- 0 0 0 0 0 0 0 0 0 0 0 0
56636- 0 0 0 0 0 0 0 0 0 0 0 0
56637- 0 0 0 0 0 0 0 0 0 0 0 0
56638- 0 0 0 0 0 0 0 0 0 0 0 0
56639- 10 10 10 30 30 30 66 66 66 58 58 58
56640- 2 2 6 2 2 6 2 2 6 2 2 6
56641- 2 2 6 2 2 6 2 2 6 2 2 6
56642- 2 2 6 2 2 6 2 2 6 26 26 26
56643- 86 86 86 101 101 101 46 46 46 10 10 10
56644- 2 2 6 58 58 58 70 70 70 34 34 34
56645- 10 10 10 0 0 0 0 0 0 0 0 0
56646- 0 0 0 0 0 0 0 0 0 0 0 0
56647- 0 0 0 0 0 0 0 0 0 0 0 0
56648- 0 0 0 0 0 0 0 0 0 0 0 0
56649- 0 0 0 0 0 0 0 0 0 0 0 0
56650- 0 0 0 0 0 0 0 0 0 0 0 0
56651- 0 0 0 0 0 0 0 0 0 0 0 0
56652- 0 0 0 0 0 0 0 0 0 0 0 0
56653- 0 0 1 0 0 1 0 0 1 0 0 0
56654- 0 0 0 0 0 0 0 0 0 0 0 0
56655- 0 0 0 0 0 0 0 0 0 0 0 0
56656- 0 0 0 0 0 0 0 0 0 0 0 0
56657- 0 0 0 0 0 0 0 0 0 0 0 0
56658- 0 0 0 0 0 0 0 0 0 0 0 0
56659- 14 14 14 42 42 42 86 86 86 10 10 10
56660- 2 2 6 2 2 6 2 2 6 2 2 6
56661- 2 2 6 2 2 6 2 2 6 2 2 6
56662- 2 2 6 2 2 6 2 2 6 30 30 30
56663- 94 94 94 94 94 94 58 58 58 26 26 26
56664- 2 2 6 6 6 6 78 78 78 54 54 54
56665- 22 22 22 6 6 6 0 0 0 0 0 0
56666- 0 0 0 0 0 0 0 0 0 0 0 0
56667- 0 0 0 0 0 0 0 0 0 0 0 0
56668- 0 0 0 0 0 0 0 0 0 0 0 0
56669- 0 0 0 0 0 0 0 0 0 0 0 0
56670- 0 0 0 0 0 0 0 0 0 0 0 0
56671- 0 0 0 0 0 0 0 0 0 0 0 0
56672- 0 0 0 0 0 0 0 0 0 0 0 0
56673- 0 0 0 0 0 0 0 0 0 0 0 0
56674- 0 0 0 0 0 0 0 0 0 0 0 0
56675- 0 0 0 0 0 0 0 0 0 0 0 0
56676- 0 0 0 0 0 0 0 0 0 0 0 0
56677- 0 0 0 0 0 0 0 0 0 0 0 0
56678- 0 0 0 0 0 0 0 0 0 6 6 6
56679- 22 22 22 62 62 62 62 62 62 2 2 6
56680- 2 2 6 2 2 6 2 2 6 2 2 6
56681- 2 2 6 2 2 6 2 2 6 2 2 6
56682- 2 2 6 2 2 6 2 2 6 26 26 26
56683- 54 54 54 38 38 38 18 18 18 10 10 10
56684- 2 2 6 2 2 6 34 34 34 82 82 82
56685- 38 38 38 14 14 14 0 0 0 0 0 0
56686- 0 0 0 0 0 0 0 0 0 0 0 0
56687- 0 0 0 0 0 0 0 0 0 0 0 0
56688- 0 0 0 0 0 0 0 0 0 0 0 0
56689- 0 0 0 0 0 0 0 0 0 0 0 0
56690- 0 0 0 0 0 0 0 0 0 0 0 0
56691- 0 0 0 0 0 0 0 0 0 0 0 0
56692- 0 0 0 0 0 0 0 0 0 0 0 0
56693- 0 0 0 0 0 1 0 0 1 0 0 0
56694- 0 0 0 0 0 0 0 0 0 0 0 0
56695- 0 0 0 0 0 0 0 0 0 0 0 0
56696- 0 0 0 0 0 0 0 0 0 0 0 0
56697- 0 0 0 0 0 0 0 0 0 0 0 0
56698- 0 0 0 0 0 0 0 0 0 6 6 6
56699- 30 30 30 78 78 78 30 30 30 2 2 6
56700- 2 2 6 2 2 6 2 2 6 2 2 6
56701- 2 2 6 2 2 6 2 2 6 2 2 6
56702- 2 2 6 2 2 6 2 2 6 10 10 10
56703- 10 10 10 2 2 6 2 2 6 2 2 6
56704- 2 2 6 2 2 6 2 2 6 78 78 78
56705- 50 50 50 18 18 18 6 6 6 0 0 0
56706- 0 0 0 0 0 0 0 0 0 0 0 0
56707- 0 0 0 0 0 0 0 0 0 0 0 0
56708- 0 0 0 0 0 0 0 0 0 0 0 0
56709- 0 0 0 0 0 0 0 0 0 0 0 0
56710- 0 0 0 0 0 0 0 0 0 0 0 0
56711- 0 0 0 0 0 0 0 0 0 0 0 0
56712- 0 0 0 0 0 0 0 0 0 0 0 0
56713- 0 0 1 0 0 0 0 0 0 0 0 0
56714- 0 0 0 0 0 0 0 0 0 0 0 0
56715- 0 0 0 0 0 0 0 0 0 0 0 0
56716- 0 0 0 0 0 0 0 0 0 0 0 0
56717- 0 0 0 0 0 0 0 0 0 0 0 0
56718- 0 0 0 0 0 0 0 0 0 10 10 10
56719- 38 38 38 86 86 86 14 14 14 2 2 6
56720- 2 2 6 2 2 6 2 2 6 2 2 6
56721- 2 2 6 2 2 6 2 2 6 2 2 6
56722- 2 2 6 2 2 6 2 2 6 2 2 6
56723- 2 2 6 2 2 6 2 2 6 2 2 6
56724- 2 2 6 2 2 6 2 2 6 54 54 54
56725- 66 66 66 26 26 26 6 6 6 0 0 0
56726- 0 0 0 0 0 0 0 0 0 0 0 0
56727- 0 0 0 0 0 0 0 0 0 0 0 0
56728- 0 0 0 0 0 0 0 0 0 0 0 0
56729- 0 0 0 0 0 0 0 0 0 0 0 0
56730- 0 0 0 0 0 0 0 0 0 0 0 0
56731- 0 0 0 0 0 0 0 0 0 0 0 0
56732- 0 0 0 0 0 0 0 0 0 0 0 0
56733- 0 0 0 0 0 1 0 0 1 0 0 0
56734- 0 0 0 0 0 0 0 0 0 0 0 0
56735- 0 0 0 0 0 0 0 0 0 0 0 0
56736- 0 0 0 0 0 0 0 0 0 0 0 0
56737- 0 0 0 0 0 0 0 0 0 0 0 0
56738- 0 0 0 0 0 0 0 0 0 14 14 14
56739- 42 42 42 82 82 82 2 2 6 2 2 6
56740- 2 2 6 6 6 6 10 10 10 2 2 6
56741- 2 2 6 2 2 6 2 2 6 2 2 6
56742- 2 2 6 2 2 6 2 2 6 6 6 6
56743- 14 14 14 10 10 10 2 2 6 2 2 6
56744- 2 2 6 2 2 6 2 2 6 18 18 18
56745- 82 82 82 34 34 34 10 10 10 0 0 0
56746- 0 0 0 0 0 0 0 0 0 0 0 0
56747- 0 0 0 0 0 0 0 0 0 0 0 0
56748- 0 0 0 0 0 0 0 0 0 0 0 0
56749- 0 0 0 0 0 0 0 0 0 0 0 0
56750- 0 0 0 0 0 0 0 0 0 0 0 0
56751- 0 0 0 0 0 0 0 0 0 0 0 0
56752- 0 0 0 0 0 0 0 0 0 0 0 0
56753- 0 0 1 0 0 0 0 0 0 0 0 0
56754- 0 0 0 0 0 0 0 0 0 0 0 0
56755- 0 0 0 0 0 0 0 0 0 0 0 0
56756- 0 0 0 0 0 0 0 0 0 0 0 0
56757- 0 0 0 0 0 0 0 0 0 0 0 0
56758- 0 0 0 0 0 0 0 0 0 14 14 14
56759- 46 46 46 86 86 86 2 2 6 2 2 6
56760- 6 6 6 6 6 6 22 22 22 34 34 34
56761- 6 6 6 2 2 6 2 2 6 2 2 6
56762- 2 2 6 2 2 6 18 18 18 34 34 34
56763- 10 10 10 50 50 50 22 22 22 2 2 6
56764- 2 2 6 2 2 6 2 2 6 10 10 10
56765- 86 86 86 42 42 42 14 14 14 0 0 0
56766- 0 0 0 0 0 0 0 0 0 0 0 0
56767- 0 0 0 0 0 0 0 0 0 0 0 0
56768- 0 0 0 0 0 0 0 0 0 0 0 0
56769- 0 0 0 0 0 0 0 0 0 0 0 0
56770- 0 0 0 0 0 0 0 0 0 0 0 0
56771- 0 0 0 0 0 0 0 0 0 0 0 0
56772- 0 0 0 0 0 0 0 0 0 0 0 0
56773- 0 0 1 0 0 1 0 0 1 0 0 0
56774- 0 0 0 0 0 0 0 0 0 0 0 0
56775- 0 0 0 0 0 0 0 0 0 0 0 0
56776- 0 0 0 0 0 0 0 0 0 0 0 0
56777- 0 0 0 0 0 0 0 0 0 0 0 0
56778- 0 0 0 0 0 0 0 0 0 14 14 14
56779- 46 46 46 86 86 86 2 2 6 2 2 6
56780- 38 38 38 116 116 116 94 94 94 22 22 22
56781- 22 22 22 2 2 6 2 2 6 2 2 6
56782- 14 14 14 86 86 86 138 138 138 162 162 162
56783-154 154 154 38 38 38 26 26 26 6 6 6
56784- 2 2 6 2 2 6 2 2 6 2 2 6
56785- 86 86 86 46 46 46 14 14 14 0 0 0
56786- 0 0 0 0 0 0 0 0 0 0 0 0
56787- 0 0 0 0 0 0 0 0 0 0 0 0
56788- 0 0 0 0 0 0 0 0 0 0 0 0
56789- 0 0 0 0 0 0 0 0 0 0 0 0
56790- 0 0 0 0 0 0 0 0 0 0 0 0
56791- 0 0 0 0 0 0 0 0 0 0 0 0
56792- 0 0 0 0 0 0 0 0 0 0 0 0
56793- 0 0 0 0 0 0 0 0 0 0 0 0
56794- 0 0 0 0 0 0 0 0 0 0 0 0
56795- 0 0 0 0 0 0 0 0 0 0 0 0
56796- 0 0 0 0 0 0 0 0 0 0 0 0
56797- 0 0 0 0 0 0 0 0 0 0 0 0
56798- 0 0 0 0 0 0 0 0 0 14 14 14
56799- 46 46 46 86 86 86 2 2 6 14 14 14
56800-134 134 134 198 198 198 195 195 195 116 116 116
56801- 10 10 10 2 2 6 2 2 6 6 6 6
56802-101 98 89 187 187 187 210 210 210 218 218 218
56803-214 214 214 134 134 134 14 14 14 6 6 6
56804- 2 2 6 2 2 6 2 2 6 2 2 6
56805- 86 86 86 50 50 50 18 18 18 6 6 6
56806- 0 0 0 0 0 0 0 0 0 0 0 0
56807- 0 0 0 0 0 0 0 0 0 0 0 0
56808- 0 0 0 0 0 0 0 0 0 0 0 0
56809- 0 0 0 0 0 0 0 0 0 0 0 0
56810- 0 0 0 0 0 0 0 0 0 0 0 0
56811- 0 0 0 0 0 0 0 0 0 0 0 0
56812- 0 0 0 0 0 0 0 0 1 0 0 0
56813- 0 0 1 0 0 1 0 0 1 0 0 0
56814- 0 0 0 0 0 0 0 0 0 0 0 0
56815- 0 0 0 0 0 0 0 0 0 0 0 0
56816- 0 0 0 0 0 0 0 0 0 0 0 0
56817- 0 0 0 0 0 0 0 0 0 0 0 0
56818- 0 0 0 0 0 0 0 0 0 14 14 14
56819- 46 46 46 86 86 86 2 2 6 54 54 54
56820-218 218 218 195 195 195 226 226 226 246 246 246
56821- 58 58 58 2 2 6 2 2 6 30 30 30
56822-210 210 210 253 253 253 174 174 174 123 123 123
56823-221 221 221 234 234 234 74 74 74 2 2 6
56824- 2 2 6 2 2 6 2 2 6 2 2 6
56825- 70 70 70 58 58 58 22 22 22 6 6 6
56826- 0 0 0 0 0 0 0 0 0 0 0 0
56827- 0 0 0 0 0 0 0 0 0 0 0 0
56828- 0 0 0 0 0 0 0 0 0 0 0 0
56829- 0 0 0 0 0 0 0 0 0 0 0 0
56830- 0 0 0 0 0 0 0 0 0 0 0 0
56831- 0 0 0 0 0 0 0 0 0 0 0 0
56832- 0 0 0 0 0 0 0 0 0 0 0 0
56833- 0 0 0 0 0 0 0 0 0 0 0 0
56834- 0 0 0 0 0 0 0 0 0 0 0 0
56835- 0 0 0 0 0 0 0 0 0 0 0 0
56836- 0 0 0 0 0 0 0 0 0 0 0 0
56837- 0 0 0 0 0 0 0 0 0 0 0 0
56838- 0 0 0 0 0 0 0 0 0 14 14 14
56839- 46 46 46 82 82 82 2 2 6 106 106 106
56840-170 170 170 26 26 26 86 86 86 226 226 226
56841-123 123 123 10 10 10 14 14 14 46 46 46
56842-231 231 231 190 190 190 6 6 6 70 70 70
56843- 90 90 90 238 238 238 158 158 158 2 2 6
56844- 2 2 6 2 2 6 2 2 6 2 2 6
56845- 70 70 70 58 58 58 22 22 22 6 6 6
56846- 0 0 0 0 0 0 0 0 0 0 0 0
56847- 0 0 0 0 0 0 0 0 0 0 0 0
56848- 0 0 0 0 0 0 0 0 0 0 0 0
56849- 0 0 0 0 0 0 0 0 0 0 0 0
56850- 0 0 0 0 0 0 0 0 0 0 0 0
56851- 0 0 0 0 0 0 0 0 0 0 0 0
56852- 0 0 0 0 0 0 0 0 1 0 0 0
56853- 0 0 1 0 0 1 0 0 1 0 0 0
56854- 0 0 0 0 0 0 0 0 0 0 0 0
56855- 0 0 0 0 0 0 0 0 0 0 0 0
56856- 0 0 0 0 0 0 0 0 0 0 0 0
56857- 0 0 0 0 0 0 0 0 0 0 0 0
56858- 0 0 0 0 0 0 0 0 0 14 14 14
56859- 42 42 42 86 86 86 6 6 6 116 116 116
56860-106 106 106 6 6 6 70 70 70 149 149 149
56861-128 128 128 18 18 18 38 38 38 54 54 54
56862-221 221 221 106 106 106 2 2 6 14 14 14
56863- 46 46 46 190 190 190 198 198 198 2 2 6
56864- 2 2 6 2 2 6 2 2 6 2 2 6
56865- 74 74 74 62 62 62 22 22 22 6 6 6
56866- 0 0 0 0 0 0 0 0 0 0 0 0
56867- 0 0 0 0 0 0 0 0 0 0 0 0
56868- 0 0 0 0 0 0 0 0 0 0 0 0
56869- 0 0 0 0 0 0 0 0 0 0 0 0
56870- 0 0 0 0 0 0 0 0 0 0 0 0
56871- 0 0 0 0 0 0 0 0 0 0 0 0
56872- 0 0 0 0 0 0 0 0 1 0 0 0
56873- 0 0 1 0 0 0 0 0 1 0 0 0
56874- 0 0 0 0 0 0 0 0 0 0 0 0
56875- 0 0 0 0 0 0 0 0 0 0 0 0
56876- 0 0 0 0 0 0 0 0 0 0 0 0
56877- 0 0 0 0 0 0 0 0 0 0 0 0
56878- 0 0 0 0 0 0 0 0 0 14 14 14
56879- 42 42 42 94 94 94 14 14 14 101 101 101
56880-128 128 128 2 2 6 18 18 18 116 116 116
56881-118 98 46 121 92 8 121 92 8 98 78 10
56882-162 162 162 106 106 106 2 2 6 2 2 6
56883- 2 2 6 195 195 195 195 195 195 6 6 6
56884- 2 2 6 2 2 6 2 2 6 2 2 6
56885- 74 74 74 62 62 62 22 22 22 6 6 6
56886- 0 0 0 0 0 0 0 0 0 0 0 0
56887- 0 0 0 0 0 0 0 0 0 0 0 0
56888- 0 0 0 0 0 0 0 0 0 0 0 0
56889- 0 0 0 0 0 0 0 0 0 0 0 0
56890- 0 0 0 0 0 0 0 0 0 0 0 0
56891- 0 0 0 0 0 0 0 0 0 0 0 0
56892- 0 0 0 0 0 0 0 0 1 0 0 1
56893- 0 0 1 0 0 0 0 0 1 0 0 0
56894- 0 0 0 0 0 0 0 0 0 0 0 0
56895- 0 0 0 0 0 0 0 0 0 0 0 0
56896- 0 0 0 0 0 0 0 0 0 0 0 0
56897- 0 0 0 0 0 0 0 0 0 0 0 0
56898- 0 0 0 0 0 0 0 0 0 10 10 10
56899- 38 38 38 90 90 90 14 14 14 58 58 58
56900-210 210 210 26 26 26 54 38 6 154 114 10
56901-226 170 11 236 186 11 225 175 15 184 144 12
56902-215 174 15 175 146 61 37 26 9 2 2 6
56903- 70 70 70 246 246 246 138 138 138 2 2 6
56904- 2 2 6 2 2 6 2 2 6 2 2 6
56905- 70 70 70 66 66 66 26 26 26 6 6 6
56906- 0 0 0 0 0 0 0 0 0 0 0 0
56907- 0 0 0 0 0 0 0 0 0 0 0 0
56908- 0 0 0 0 0 0 0 0 0 0 0 0
56909- 0 0 0 0 0 0 0 0 0 0 0 0
56910- 0 0 0 0 0 0 0 0 0 0 0 0
56911- 0 0 0 0 0 0 0 0 0 0 0 0
56912- 0 0 0 0 0 0 0 0 0 0 0 0
56913- 0 0 0 0 0 0 0 0 0 0 0 0
56914- 0 0 0 0 0 0 0 0 0 0 0 0
56915- 0 0 0 0 0 0 0 0 0 0 0 0
56916- 0 0 0 0 0 0 0 0 0 0 0 0
56917- 0 0 0 0 0 0 0 0 0 0 0 0
56918- 0 0 0 0 0 0 0 0 0 10 10 10
56919- 38 38 38 86 86 86 14 14 14 10 10 10
56920-195 195 195 188 164 115 192 133 9 225 175 15
56921-239 182 13 234 190 10 232 195 16 232 200 30
56922-245 207 45 241 208 19 232 195 16 184 144 12
56923-218 194 134 211 206 186 42 42 42 2 2 6
56924- 2 2 6 2 2 6 2 2 6 2 2 6
56925- 50 50 50 74 74 74 30 30 30 6 6 6
56926- 0 0 0 0 0 0 0 0 0 0 0 0
56927- 0 0 0 0 0 0 0 0 0 0 0 0
56928- 0 0 0 0 0 0 0 0 0 0 0 0
56929- 0 0 0 0 0 0 0 0 0 0 0 0
56930- 0 0 0 0 0 0 0 0 0 0 0 0
56931- 0 0 0 0 0 0 0 0 0 0 0 0
56932- 0 0 0 0 0 0 0 0 0 0 0 0
56933- 0 0 0 0 0 0 0 0 0 0 0 0
56934- 0 0 0 0 0 0 0 0 0 0 0 0
56935- 0 0 0 0 0 0 0 0 0 0 0 0
56936- 0 0 0 0 0 0 0 0 0 0 0 0
56937- 0 0 0 0 0 0 0 0 0 0 0 0
56938- 0 0 0 0 0 0 0 0 0 10 10 10
56939- 34 34 34 86 86 86 14 14 14 2 2 6
56940-121 87 25 192 133 9 219 162 10 239 182 13
56941-236 186 11 232 195 16 241 208 19 244 214 54
56942-246 218 60 246 218 38 246 215 20 241 208 19
56943-241 208 19 226 184 13 121 87 25 2 2 6
56944- 2 2 6 2 2 6 2 2 6 2 2 6
56945- 50 50 50 82 82 82 34 34 34 10 10 10
56946- 0 0 0 0 0 0 0 0 0 0 0 0
56947- 0 0 0 0 0 0 0 0 0 0 0 0
56948- 0 0 0 0 0 0 0 0 0 0 0 0
56949- 0 0 0 0 0 0 0 0 0 0 0 0
56950- 0 0 0 0 0 0 0 0 0 0 0 0
56951- 0 0 0 0 0 0 0 0 0 0 0 0
56952- 0 0 0 0 0 0 0 0 0 0 0 0
56953- 0 0 0 0 0 0 0 0 0 0 0 0
56954- 0 0 0 0 0 0 0 0 0 0 0 0
56955- 0 0 0 0 0 0 0 0 0 0 0 0
56956- 0 0 0 0 0 0 0 0 0 0 0 0
56957- 0 0 0 0 0 0 0 0 0 0 0 0
56958- 0 0 0 0 0 0 0 0 0 10 10 10
56959- 34 34 34 82 82 82 30 30 30 61 42 6
56960-180 123 7 206 145 10 230 174 11 239 182 13
56961-234 190 10 238 202 15 241 208 19 246 218 74
56962-246 218 38 246 215 20 246 215 20 246 215 20
56963-226 184 13 215 174 15 184 144 12 6 6 6
56964- 2 2 6 2 2 6 2 2 6 2 2 6
56965- 26 26 26 94 94 94 42 42 42 14 14 14
56966- 0 0 0 0 0 0 0 0 0 0 0 0
56967- 0 0 0 0 0 0 0 0 0 0 0 0
56968- 0 0 0 0 0 0 0 0 0 0 0 0
56969- 0 0 0 0 0 0 0 0 0 0 0 0
56970- 0 0 0 0 0 0 0 0 0 0 0 0
56971- 0 0 0 0 0 0 0 0 0 0 0 0
56972- 0 0 0 0 0 0 0 0 0 0 0 0
56973- 0 0 0 0 0 0 0 0 0 0 0 0
56974- 0 0 0 0 0 0 0 0 0 0 0 0
56975- 0 0 0 0 0 0 0 0 0 0 0 0
56976- 0 0 0 0 0 0 0 0 0 0 0 0
56977- 0 0 0 0 0 0 0 0 0 0 0 0
56978- 0 0 0 0 0 0 0 0 0 10 10 10
56979- 30 30 30 78 78 78 50 50 50 104 69 6
56980-192 133 9 216 158 10 236 178 12 236 186 11
56981-232 195 16 241 208 19 244 214 54 245 215 43
56982-246 215 20 246 215 20 241 208 19 198 155 10
56983-200 144 11 216 158 10 156 118 10 2 2 6
56984- 2 2 6 2 2 6 2 2 6 2 2 6
56985- 6 6 6 90 90 90 54 54 54 18 18 18
56986- 6 6 6 0 0 0 0 0 0 0 0 0
56987- 0 0 0 0 0 0 0 0 0 0 0 0
56988- 0 0 0 0 0 0 0 0 0 0 0 0
56989- 0 0 0 0 0 0 0 0 0 0 0 0
56990- 0 0 0 0 0 0 0 0 0 0 0 0
56991- 0 0 0 0 0 0 0 0 0 0 0 0
56992- 0 0 0 0 0 0 0 0 0 0 0 0
56993- 0 0 0 0 0 0 0 0 0 0 0 0
56994- 0 0 0 0 0 0 0 0 0 0 0 0
56995- 0 0 0 0 0 0 0 0 0 0 0 0
56996- 0 0 0 0 0 0 0 0 0 0 0 0
56997- 0 0 0 0 0 0 0 0 0 0 0 0
56998- 0 0 0 0 0 0 0 0 0 10 10 10
56999- 30 30 30 78 78 78 46 46 46 22 22 22
57000-137 92 6 210 162 10 239 182 13 238 190 10
57001-238 202 15 241 208 19 246 215 20 246 215 20
57002-241 208 19 203 166 17 185 133 11 210 150 10
57003-216 158 10 210 150 10 102 78 10 2 2 6
57004- 6 6 6 54 54 54 14 14 14 2 2 6
57005- 2 2 6 62 62 62 74 74 74 30 30 30
57006- 10 10 10 0 0 0 0 0 0 0 0 0
57007- 0 0 0 0 0 0 0 0 0 0 0 0
57008- 0 0 0 0 0 0 0 0 0 0 0 0
57009- 0 0 0 0 0 0 0 0 0 0 0 0
57010- 0 0 0 0 0 0 0 0 0 0 0 0
57011- 0 0 0 0 0 0 0 0 0 0 0 0
57012- 0 0 0 0 0 0 0 0 0 0 0 0
57013- 0 0 0 0 0 0 0 0 0 0 0 0
57014- 0 0 0 0 0 0 0 0 0 0 0 0
57015- 0 0 0 0 0 0 0 0 0 0 0 0
57016- 0 0 0 0 0 0 0 0 0 0 0 0
57017- 0 0 0 0 0 0 0 0 0 0 0 0
57018- 0 0 0 0 0 0 0 0 0 10 10 10
57019- 34 34 34 78 78 78 50 50 50 6 6 6
57020- 94 70 30 139 102 15 190 146 13 226 184 13
57021-232 200 30 232 195 16 215 174 15 190 146 13
57022-168 122 10 192 133 9 210 150 10 213 154 11
57023-202 150 34 182 157 106 101 98 89 2 2 6
57024- 2 2 6 78 78 78 116 116 116 58 58 58
57025- 2 2 6 22 22 22 90 90 90 46 46 46
57026- 18 18 18 6 6 6 0 0 0 0 0 0
57027- 0 0 0 0 0 0 0 0 0 0 0 0
57028- 0 0 0 0 0 0 0 0 0 0 0 0
57029- 0 0 0 0 0 0 0 0 0 0 0 0
57030- 0 0 0 0 0 0 0 0 0 0 0 0
57031- 0 0 0 0 0 0 0 0 0 0 0 0
57032- 0 0 0 0 0 0 0 0 0 0 0 0
57033- 0 0 0 0 0 0 0 0 0 0 0 0
57034- 0 0 0 0 0 0 0 0 0 0 0 0
57035- 0 0 0 0 0 0 0 0 0 0 0 0
57036- 0 0 0 0 0 0 0 0 0 0 0 0
57037- 0 0 0 0 0 0 0 0 0 0 0 0
57038- 0 0 0 0 0 0 0 0 0 10 10 10
57039- 38 38 38 86 86 86 50 50 50 6 6 6
57040-128 128 128 174 154 114 156 107 11 168 122 10
57041-198 155 10 184 144 12 197 138 11 200 144 11
57042-206 145 10 206 145 10 197 138 11 188 164 115
57043-195 195 195 198 198 198 174 174 174 14 14 14
57044- 2 2 6 22 22 22 116 116 116 116 116 116
57045- 22 22 22 2 2 6 74 74 74 70 70 70
57046- 30 30 30 10 10 10 0 0 0 0 0 0
57047- 0 0 0 0 0 0 0 0 0 0 0 0
57048- 0 0 0 0 0 0 0 0 0 0 0 0
57049- 0 0 0 0 0 0 0 0 0 0 0 0
57050- 0 0 0 0 0 0 0 0 0 0 0 0
57051- 0 0 0 0 0 0 0 0 0 0 0 0
57052- 0 0 0 0 0 0 0 0 0 0 0 0
57053- 0 0 0 0 0 0 0 0 0 0 0 0
57054- 0 0 0 0 0 0 0 0 0 0 0 0
57055- 0 0 0 0 0 0 0 0 0 0 0 0
57056- 0 0 0 0 0 0 0 0 0 0 0 0
57057- 0 0 0 0 0 0 0 0 0 0 0 0
57058- 0 0 0 0 0 0 6 6 6 18 18 18
57059- 50 50 50 101 101 101 26 26 26 10 10 10
57060-138 138 138 190 190 190 174 154 114 156 107 11
57061-197 138 11 200 144 11 197 138 11 192 133 9
57062-180 123 7 190 142 34 190 178 144 187 187 187
57063-202 202 202 221 221 221 214 214 214 66 66 66
57064- 2 2 6 2 2 6 50 50 50 62 62 62
57065- 6 6 6 2 2 6 10 10 10 90 90 90
57066- 50 50 50 18 18 18 6 6 6 0 0 0
57067- 0 0 0 0 0 0 0 0 0 0 0 0
57068- 0 0 0 0 0 0 0 0 0 0 0 0
57069- 0 0 0 0 0 0 0 0 0 0 0 0
57070- 0 0 0 0 0 0 0 0 0 0 0 0
57071- 0 0 0 0 0 0 0 0 0 0 0 0
57072- 0 0 0 0 0 0 0 0 0 0 0 0
57073- 0 0 0 0 0 0 0 0 0 0 0 0
57074- 0 0 0 0 0 0 0 0 0 0 0 0
57075- 0 0 0 0 0 0 0 0 0 0 0 0
57076- 0 0 0 0 0 0 0 0 0 0 0 0
57077- 0 0 0 0 0 0 0 0 0 0 0 0
57078- 0 0 0 0 0 0 10 10 10 34 34 34
57079- 74 74 74 74 74 74 2 2 6 6 6 6
57080-144 144 144 198 198 198 190 190 190 178 166 146
57081-154 121 60 156 107 11 156 107 11 168 124 44
57082-174 154 114 187 187 187 190 190 190 210 210 210
57083-246 246 246 253 253 253 253 253 253 182 182 182
57084- 6 6 6 2 2 6 2 2 6 2 2 6
57085- 2 2 6 2 2 6 2 2 6 62 62 62
57086- 74 74 74 34 34 34 14 14 14 0 0 0
57087- 0 0 0 0 0 0 0 0 0 0 0 0
57088- 0 0 0 0 0 0 0 0 0 0 0 0
57089- 0 0 0 0 0 0 0 0 0 0 0 0
57090- 0 0 0 0 0 0 0 0 0 0 0 0
57091- 0 0 0 0 0 0 0 0 0 0 0 0
57092- 0 0 0 0 0 0 0 0 0 0 0 0
57093- 0 0 0 0 0 0 0 0 0 0 0 0
57094- 0 0 0 0 0 0 0 0 0 0 0 0
57095- 0 0 0 0 0 0 0 0 0 0 0 0
57096- 0 0 0 0 0 0 0 0 0 0 0 0
57097- 0 0 0 0 0 0 0 0 0 0 0 0
57098- 0 0 0 10 10 10 22 22 22 54 54 54
57099- 94 94 94 18 18 18 2 2 6 46 46 46
57100-234 234 234 221 221 221 190 190 190 190 190 190
57101-190 190 190 187 187 187 187 187 187 190 190 190
57102-190 190 190 195 195 195 214 214 214 242 242 242
57103-253 253 253 253 253 253 253 253 253 253 253 253
57104- 82 82 82 2 2 6 2 2 6 2 2 6
57105- 2 2 6 2 2 6 2 2 6 14 14 14
57106- 86 86 86 54 54 54 22 22 22 6 6 6
57107- 0 0 0 0 0 0 0 0 0 0 0 0
57108- 0 0 0 0 0 0 0 0 0 0 0 0
57109- 0 0 0 0 0 0 0 0 0 0 0 0
57110- 0 0 0 0 0 0 0 0 0 0 0 0
57111- 0 0 0 0 0 0 0 0 0 0 0 0
57112- 0 0 0 0 0 0 0 0 0 0 0 0
57113- 0 0 0 0 0 0 0 0 0 0 0 0
57114- 0 0 0 0 0 0 0 0 0 0 0 0
57115- 0 0 0 0 0 0 0 0 0 0 0 0
57116- 0 0 0 0 0 0 0 0 0 0 0 0
57117- 0 0 0 0 0 0 0 0 0 0 0 0
57118- 6 6 6 18 18 18 46 46 46 90 90 90
57119- 46 46 46 18 18 18 6 6 6 182 182 182
57120-253 253 253 246 246 246 206 206 206 190 190 190
57121-190 190 190 190 190 190 190 190 190 190 190 190
57122-206 206 206 231 231 231 250 250 250 253 253 253
57123-253 253 253 253 253 253 253 253 253 253 253 253
57124-202 202 202 14 14 14 2 2 6 2 2 6
57125- 2 2 6 2 2 6 2 2 6 2 2 6
57126- 42 42 42 86 86 86 42 42 42 18 18 18
57127- 6 6 6 0 0 0 0 0 0 0 0 0
57128- 0 0 0 0 0 0 0 0 0 0 0 0
57129- 0 0 0 0 0 0 0 0 0 0 0 0
57130- 0 0 0 0 0 0 0 0 0 0 0 0
57131- 0 0 0 0 0 0 0 0 0 0 0 0
57132- 0 0 0 0 0 0 0 0 0 0 0 0
57133- 0 0 0 0 0 0 0 0 0 0 0 0
57134- 0 0 0 0 0 0 0 0 0 0 0 0
57135- 0 0 0 0 0 0 0 0 0 0 0 0
57136- 0 0 0 0 0 0 0 0 0 0 0 0
57137- 0 0 0 0 0 0 0 0 0 6 6 6
57138- 14 14 14 38 38 38 74 74 74 66 66 66
57139- 2 2 6 6 6 6 90 90 90 250 250 250
57140-253 253 253 253 253 253 238 238 238 198 198 198
57141-190 190 190 190 190 190 195 195 195 221 221 221
57142-246 246 246 253 253 253 253 253 253 253 253 253
57143-253 253 253 253 253 253 253 253 253 253 253 253
57144-253 253 253 82 82 82 2 2 6 2 2 6
57145- 2 2 6 2 2 6 2 2 6 2 2 6
57146- 2 2 6 78 78 78 70 70 70 34 34 34
57147- 14 14 14 6 6 6 0 0 0 0 0 0
57148- 0 0 0 0 0 0 0 0 0 0 0 0
57149- 0 0 0 0 0 0 0 0 0 0 0 0
57150- 0 0 0 0 0 0 0 0 0 0 0 0
57151- 0 0 0 0 0 0 0 0 0 0 0 0
57152- 0 0 0 0 0 0 0 0 0 0 0 0
57153- 0 0 0 0 0 0 0 0 0 0 0 0
57154- 0 0 0 0 0 0 0 0 0 0 0 0
57155- 0 0 0 0 0 0 0 0 0 0 0 0
57156- 0 0 0 0 0 0 0 0 0 0 0 0
57157- 0 0 0 0 0 0 0 0 0 14 14 14
57158- 34 34 34 66 66 66 78 78 78 6 6 6
57159- 2 2 6 18 18 18 218 218 218 253 253 253
57160-253 253 253 253 253 253 253 253 253 246 246 246
57161-226 226 226 231 231 231 246 246 246 253 253 253
57162-253 253 253 253 253 253 253 253 253 253 253 253
57163-253 253 253 253 253 253 253 253 253 253 253 253
57164-253 253 253 178 178 178 2 2 6 2 2 6
57165- 2 2 6 2 2 6 2 2 6 2 2 6
57166- 2 2 6 18 18 18 90 90 90 62 62 62
57167- 30 30 30 10 10 10 0 0 0 0 0 0
57168- 0 0 0 0 0 0 0 0 0 0 0 0
57169- 0 0 0 0 0 0 0 0 0 0 0 0
57170- 0 0 0 0 0 0 0 0 0 0 0 0
57171- 0 0 0 0 0 0 0 0 0 0 0 0
57172- 0 0 0 0 0 0 0 0 0 0 0 0
57173- 0 0 0 0 0 0 0 0 0 0 0 0
57174- 0 0 0 0 0 0 0 0 0 0 0 0
57175- 0 0 0 0 0 0 0 0 0 0 0 0
57176- 0 0 0 0 0 0 0 0 0 0 0 0
57177- 0 0 0 0 0 0 10 10 10 26 26 26
57178- 58 58 58 90 90 90 18 18 18 2 2 6
57179- 2 2 6 110 110 110 253 253 253 253 253 253
57180-253 253 253 253 253 253 253 253 253 253 253 253
57181-250 250 250 253 253 253 253 253 253 253 253 253
57182-253 253 253 253 253 253 253 253 253 253 253 253
57183-253 253 253 253 253 253 253 253 253 253 253 253
57184-253 253 253 231 231 231 18 18 18 2 2 6
57185- 2 2 6 2 2 6 2 2 6 2 2 6
57186- 2 2 6 2 2 6 18 18 18 94 94 94
57187- 54 54 54 26 26 26 10 10 10 0 0 0
57188- 0 0 0 0 0 0 0 0 0 0 0 0
57189- 0 0 0 0 0 0 0 0 0 0 0 0
57190- 0 0 0 0 0 0 0 0 0 0 0 0
57191- 0 0 0 0 0 0 0 0 0 0 0 0
57192- 0 0 0 0 0 0 0 0 0 0 0 0
57193- 0 0 0 0 0 0 0 0 0 0 0 0
57194- 0 0 0 0 0 0 0 0 0 0 0 0
57195- 0 0 0 0 0 0 0 0 0 0 0 0
57196- 0 0 0 0 0 0 0 0 0 0 0 0
57197- 0 0 0 6 6 6 22 22 22 50 50 50
57198- 90 90 90 26 26 26 2 2 6 2 2 6
57199- 14 14 14 195 195 195 250 250 250 253 253 253
57200-253 253 253 253 253 253 253 253 253 253 253 253
57201-253 253 253 253 253 253 253 253 253 253 253 253
57202-253 253 253 253 253 253 253 253 253 253 253 253
57203-253 253 253 253 253 253 253 253 253 253 253 253
57204-250 250 250 242 242 242 54 54 54 2 2 6
57205- 2 2 6 2 2 6 2 2 6 2 2 6
57206- 2 2 6 2 2 6 2 2 6 38 38 38
57207- 86 86 86 50 50 50 22 22 22 6 6 6
57208- 0 0 0 0 0 0 0 0 0 0 0 0
57209- 0 0 0 0 0 0 0 0 0 0 0 0
57210- 0 0 0 0 0 0 0 0 0 0 0 0
57211- 0 0 0 0 0 0 0 0 0 0 0 0
57212- 0 0 0 0 0 0 0 0 0 0 0 0
57213- 0 0 0 0 0 0 0 0 0 0 0 0
57214- 0 0 0 0 0 0 0 0 0 0 0 0
57215- 0 0 0 0 0 0 0 0 0 0 0 0
57216- 0 0 0 0 0 0 0 0 0 0 0 0
57217- 6 6 6 14 14 14 38 38 38 82 82 82
57218- 34 34 34 2 2 6 2 2 6 2 2 6
57219- 42 42 42 195 195 195 246 246 246 253 253 253
57220-253 253 253 253 253 253 253 253 253 250 250 250
57221-242 242 242 242 242 242 250 250 250 253 253 253
57222-253 253 253 253 253 253 253 253 253 253 253 253
57223-253 253 253 250 250 250 246 246 246 238 238 238
57224-226 226 226 231 231 231 101 101 101 6 6 6
57225- 2 2 6 2 2 6 2 2 6 2 2 6
57226- 2 2 6 2 2 6 2 2 6 2 2 6
57227- 38 38 38 82 82 82 42 42 42 14 14 14
57228- 6 6 6 0 0 0 0 0 0 0 0 0
57229- 0 0 0 0 0 0 0 0 0 0 0 0
57230- 0 0 0 0 0 0 0 0 0 0 0 0
57231- 0 0 0 0 0 0 0 0 0 0 0 0
57232- 0 0 0 0 0 0 0 0 0 0 0 0
57233- 0 0 0 0 0 0 0 0 0 0 0 0
57234- 0 0 0 0 0 0 0 0 0 0 0 0
57235- 0 0 0 0 0 0 0 0 0 0 0 0
57236- 0 0 0 0 0 0 0 0 0 0 0 0
57237- 10 10 10 26 26 26 62 62 62 66 66 66
57238- 2 2 6 2 2 6 2 2 6 6 6 6
57239- 70 70 70 170 170 170 206 206 206 234 234 234
57240-246 246 246 250 250 250 250 250 250 238 238 238
57241-226 226 226 231 231 231 238 238 238 250 250 250
57242-250 250 250 250 250 250 246 246 246 231 231 231
57243-214 214 214 206 206 206 202 202 202 202 202 202
57244-198 198 198 202 202 202 182 182 182 18 18 18
57245- 2 2 6 2 2 6 2 2 6 2 2 6
57246- 2 2 6 2 2 6 2 2 6 2 2 6
57247- 2 2 6 62 62 62 66 66 66 30 30 30
57248- 10 10 10 0 0 0 0 0 0 0 0 0
57249- 0 0 0 0 0 0 0 0 0 0 0 0
57250- 0 0 0 0 0 0 0 0 0 0 0 0
57251- 0 0 0 0 0 0 0 0 0 0 0 0
57252- 0 0 0 0 0 0 0 0 0 0 0 0
57253- 0 0 0 0 0 0 0 0 0 0 0 0
57254- 0 0 0 0 0 0 0 0 0 0 0 0
57255- 0 0 0 0 0 0 0 0 0 0 0 0
57256- 0 0 0 0 0 0 0 0 0 0 0 0
57257- 14 14 14 42 42 42 82 82 82 18 18 18
57258- 2 2 6 2 2 6 2 2 6 10 10 10
57259- 94 94 94 182 182 182 218 218 218 242 242 242
57260-250 250 250 253 253 253 253 253 253 250 250 250
57261-234 234 234 253 253 253 253 253 253 253 253 253
57262-253 253 253 253 253 253 253 253 253 246 246 246
57263-238 238 238 226 226 226 210 210 210 202 202 202
57264-195 195 195 195 195 195 210 210 210 158 158 158
57265- 6 6 6 14 14 14 50 50 50 14 14 14
57266- 2 2 6 2 2 6 2 2 6 2 2 6
57267- 2 2 6 6 6 6 86 86 86 46 46 46
57268- 18 18 18 6 6 6 0 0 0 0 0 0
57269- 0 0 0 0 0 0 0 0 0 0 0 0
57270- 0 0 0 0 0 0 0 0 0 0 0 0
57271- 0 0 0 0 0 0 0 0 0 0 0 0
57272- 0 0 0 0 0 0 0 0 0 0 0 0
57273- 0 0 0 0 0 0 0 0 0 0 0 0
57274- 0 0 0 0 0 0 0 0 0 0 0 0
57275- 0 0 0 0 0 0 0 0 0 0 0 0
57276- 0 0 0 0 0 0 0 0 0 6 6 6
57277- 22 22 22 54 54 54 70 70 70 2 2 6
57278- 2 2 6 10 10 10 2 2 6 22 22 22
57279-166 166 166 231 231 231 250 250 250 253 253 253
57280-253 253 253 253 253 253 253 253 253 250 250 250
57281-242 242 242 253 253 253 253 253 253 253 253 253
57282-253 253 253 253 253 253 253 253 253 253 253 253
57283-253 253 253 253 253 253 253 253 253 246 246 246
57284-231 231 231 206 206 206 198 198 198 226 226 226
57285- 94 94 94 2 2 6 6 6 6 38 38 38
57286- 30 30 30 2 2 6 2 2 6 2 2 6
57287- 2 2 6 2 2 6 62 62 62 66 66 66
57288- 26 26 26 10 10 10 0 0 0 0 0 0
57289- 0 0 0 0 0 0 0 0 0 0 0 0
57290- 0 0 0 0 0 0 0 0 0 0 0 0
57291- 0 0 0 0 0 0 0 0 0 0 0 0
57292- 0 0 0 0 0 0 0 0 0 0 0 0
57293- 0 0 0 0 0 0 0 0 0 0 0 0
57294- 0 0 0 0 0 0 0 0 0 0 0 0
57295- 0 0 0 0 0 0 0 0 0 0 0 0
57296- 0 0 0 0 0 0 0 0 0 10 10 10
57297- 30 30 30 74 74 74 50 50 50 2 2 6
57298- 26 26 26 26 26 26 2 2 6 106 106 106
57299-238 238 238 253 253 253 253 253 253 253 253 253
57300-253 253 253 253 253 253 253 253 253 253 253 253
57301-253 253 253 253 253 253 253 253 253 253 253 253
57302-253 253 253 253 253 253 253 253 253 253 253 253
57303-253 253 253 253 253 253 253 253 253 253 253 253
57304-253 253 253 246 246 246 218 218 218 202 202 202
57305-210 210 210 14 14 14 2 2 6 2 2 6
57306- 30 30 30 22 22 22 2 2 6 2 2 6
57307- 2 2 6 2 2 6 18 18 18 86 86 86
57308- 42 42 42 14 14 14 0 0 0 0 0 0
57309- 0 0 0 0 0 0 0 0 0 0 0 0
57310- 0 0 0 0 0 0 0 0 0 0 0 0
57311- 0 0 0 0 0 0 0 0 0 0 0 0
57312- 0 0 0 0 0 0 0 0 0 0 0 0
57313- 0 0 0 0 0 0 0 0 0 0 0 0
57314- 0 0 0 0 0 0 0 0 0 0 0 0
57315- 0 0 0 0 0 0 0 0 0 0 0 0
57316- 0 0 0 0 0 0 0 0 0 14 14 14
57317- 42 42 42 90 90 90 22 22 22 2 2 6
57318- 42 42 42 2 2 6 18 18 18 218 218 218
57319-253 253 253 253 253 253 253 253 253 253 253 253
57320-253 253 253 253 253 253 253 253 253 253 253 253
57321-253 253 253 253 253 253 253 253 253 253 253 253
57322-253 253 253 253 253 253 253 253 253 253 253 253
57323-253 253 253 253 253 253 253 253 253 253 253 253
57324-253 253 253 253 253 253 250 250 250 221 221 221
57325-218 218 218 101 101 101 2 2 6 14 14 14
57326- 18 18 18 38 38 38 10 10 10 2 2 6
57327- 2 2 6 2 2 6 2 2 6 78 78 78
57328- 58 58 58 22 22 22 6 6 6 0 0 0
57329- 0 0 0 0 0 0 0 0 0 0 0 0
57330- 0 0 0 0 0 0 0 0 0 0 0 0
57331- 0 0 0 0 0 0 0 0 0 0 0 0
57332- 0 0 0 0 0 0 0 0 0 0 0 0
57333- 0 0 0 0 0 0 0 0 0 0 0 0
57334- 0 0 0 0 0 0 0 0 0 0 0 0
57335- 0 0 0 0 0 0 0 0 0 0 0 0
57336- 0 0 0 0 0 0 6 6 6 18 18 18
57337- 54 54 54 82 82 82 2 2 6 26 26 26
57338- 22 22 22 2 2 6 123 123 123 253 253 253
57339-253 253 253 253 253 253 253 253 253 253 253 253
57340-253 253 253 253 253 253 253 253 253 253 253 253
57341-253 253 253 253 253 253 253 253 253 253 253 253
57342-253 253 253 253 253 253 253 253 253 253 253 253
57343-253 253 253 253 253 253 253 253 253 253 253 253
57344-253 253 253 253 253 253 253 253 253 250 250 250
57345-238 238 238 198 198 198 6 6 6 38 38 38
57346- 58 58 58 26 26 26 38 38 38 2 2 6
57347- 2 2 6 2 2 6 2 2 6 46 46 46
57348- 78 78 78 30 30 30 10 10 10 0 0 0
57349- 0 0 0 0 0 0 0 0 0 0 0 0
57350- 0 0 0 0 0 0 0 0 0 0 0 0
57351- 0 0 0 0 0 0 0 0 0 0 0 0
57352- 0 0 0 0 0 0 0 0 0 0 0 0
57353- 0 0 0 0 0 0 0 0 0 0 0 0
57354- 0 0 0 0 0 0 0 0 0 0 0 0
57355- 0 0 0 0 0 0 0 0 0 0 0 0
57356- 0 0 0 0 0 0 10 10 10 30 30 30
57357- 74 74 74 58 58 58 2 2 6 42 42 42
57358- 2 2 6 22 22 22 231 231 231 253 253 253
57359-253 253 253 253 253 253 253 253 253 253 253 253
57360-253 253 253 253 253 253 253 253 253 250 250 250
57361-253 253 253 253 253 253 253 253 253 253 253 253
57362-253 253 253 253 253 253 253 253 253 253 253 253
57363-253 253 253 253 253 253 253 253 253 253 253 253
57364-253 253 253 253 253 253 253 253 253 253 253 253
57365-253 253 253 246 246 246 46 46 46 38 38 38
57366- 42 42 42 14 14 14 38 38 38 14 14 14
57367- 2 2 6 2 2 6 2 2 6 6 6 6
57368- 86 86 86 46 46 46 14 14 14 0 0 0
57369- 0 0 0 0 0 0 0 0 0 0 0 0
57370- 0 0 0 0 0 0 0 0 0 0 0 0
57371- 0 0 0 0 0 0 0 0 0 0 0 0
57372- 0 0 0 0 0 0 0 0 0 0 0 0
57373- 0 0 0 0 0 0 0 0 0 0 0 0
57374- 0 0 0 0 0 0 0 0 0 0 0 0
57375- 0 0 0 0 0 0 0 0 0 0 0 0
57376- 0 0 0 6 6 6 14 14 14 42 42 42
57377- 90 90 90 18 18 18 18 18 18 26 26 26
57378- 2 2 6 116 116 116 253 253 253 253 253 253
57379-253 253 253 253 253 253 253 253 253 253 253 253
57380-253 253 253 253 253 253 250 250 250 238 238 238
57381-253 253 253 253 253 253 253 253 253 253 253 253
57382-253 253 253 253 253 253 253 253 253 253 253 253
57383-253 253 253 253 253 253 253 253 253 253 253 253
57384-253 253 253 253 253 253 253 253 253 253 253 253
57385-253 253 253 253 253 253 94 94 94 6 6 6
57386- 2 2 6 2 2 6 10 10 10 34 34 34
57387- 2 2 6 2 2 6 2 2 6 2 2 6
57388- 74 74 74 58 58 58 22 22 22 6 6 6
57389- 0 0 0 0 0 0 0 0 0 0 0 0
57390- 0 0 0 0 0 0 0 0 0 0 0 0
57391- 0 0 0 0 0 0 0 0 0 0 0 0
57392- 0 0 0 0 0 0 0 0 0 0 0 0
57393- 0 0 0 0 0 0 0 0 0 0 0 0
57394- 0 0 0 0 0 0 0 0 0 0 0 0
57395- 0 0 0 0 0 0 0 0 0 0 0 0
57396- 0 0 0 10 10 10 26 26 26 66 66 66
57397- 82 82 82 2 2 6 38 38 38 6 6 6
57398- 14 14 14 210 210 210 253 253 253 253 253 253
57399-253 253 253 253 253 253 253 253 253 253 253 253
57400-253 253 253 253 253 253 246 246 246 242 242 242
57401-253 253 253 253 253 253 253 253 253 253 253 253
57402-253 253 253 253 253 253 253 253 253 253 253 253
57403-253 253 253 253 253 253 253 253 253 253 253 253
57404-253 253 253 253 253 253 253 253 253 253 253 253
57405-253 253 253 253 253 253 144 144 144 2 2 6
57406- 2 2 6 2 2 6 2 2 6 46 46 46
57407- 2 2 6 2 2 6 2 2 6 2 2 6
57408- 42 42 42 74 74 74 30 30 30 10 10 10
57409- 0 0 0 0 0 0 0 0 0 0 0 0
57410- 0 0 0 0 0 0 0 0 0 0 0 0
57411- 0 0 0 0 0 0 0 0 0 0 0 0
57412- 0 0 0 0 0 0 0 0 0 0 0 0
57413- 0 0 0 0 0 0 0 0 0 0 0 0
57414- 0 0 0 0 0 0 0 0 0 0 0 0
57415- 0 0 0 0 0 0 0 0 0 0 0 0
57416- 6 6 6 14 14 14 42 42 42 90 90 90
57417- 26 26 26 6 6 6 42 42 42 2 2 6
57418- 74 74 74 250 250 250 253 253 253 253 253 253
57419-253 253 253 253 253 253 253 253 253 253 253 253
57420-253 253 253 253 253 253 242 242 242 242 242 242
57421-253 253 253 253 253 253 253 253 253 253 253 253
57422-253 253 253 253 253 253 253 253 253 253 253 253
57423-253 253 253 253 253 253 253 253 253 253 253 253
57424-253 253 253 253 253 253 253 253 253 253 253 253
57425-253 253 253 253 253 253 182 182 182 2 2 6
57426- 2 2 6 2 2 6 2 2 6 46 46 46
57427- 2 2 6 2 2 6 2 2 6 2 2 6
57428- 10 10 10 86 86 86 38 38 38 10 10 10
57429- 0 0 0 0 0 0 0 0 0 0 0 0
57430- 0 0 0 0 0 0 0 0 0 0 0 0
57431- 0 0 0 0 0 0 0 0 0 0 0 0
57432- 0 0 0 0 0 0 0 0 0 0 0 0
57433- 0 0 0 0 0 0 0 0 0 0 0 0
57434- 0 0 0 0 0 0 0 0 0 0 0 0
57435- 0 0 0 0 0 0 0 0 0 0 0 0
57436- 10 10 10 26 26 26 66 66 66 82 82 82
57437- 2 2 6 22 22 22 18 18 18 2 2 6
57438-149 149 149 253 253 253 253 253 253 253 253 253
57439-253 253 253 253 253 253 253 253 253 253 253 253
57440-253 253 253 253 253 253 234 234 234 242 242 242
57441-253 253 253 253 253 253 253 253 253 253 253 253
57442-253 253 253 253 253 253 253 253 253 253 253 253
57443-253 253 253 253 253 253 253 253 253 253 253 253
57444-253 253 253 253 253 253 253 253 253 253 253 253
57445-253 253 253 253 253 253 206 206 206 2 2 6
57446- 2 2 6 2 2 6 2 2 6 38 38 38
57447- 2 2 6 2 2 6 2 2 6 2 2 6
57448- 6 6 6 86 86 86 46 46 46 14 14 14
57449- 0 0 0 0 0 0 0 0 0 0 0 0
57450- 0 0 0 0 0 0 0 0 0 0 0 0
57451- 0 0 0 0 0 0 0 0 0 0 0 0
57452- 0 0 0 0 0 0 0 0 0 0 0 0
57453- 0 0 0 0 0 0 0 0 0 0 0 0
57454- 0 0 0 0 0 0 0 0 0 0 0 0
57455- 0 0 0 0 0 0 0 0 0 6 6 6
57456- 18 18 18 46 46 46 86 86 86 18 18 18
57457- 2 2 6 34 34 34 10 10 10 6 6 6
57458-210 210 210 253 253 253 253 253 253 253 253 253
57459-253 253 253 253 253 253 253 253 253 253 253 253
57460-253 253 253 253 253 253 234 234 234 242 242 242
57461-253 253 253 253 253 253 253 253 253 253 253 253
57462-253 253 253 253 253 253 253 253 253 253 253 253
57463-253 253 253 253 253 253 253 253 253 253 253 253
57464-253 253 253 253 253 253 253 253 253 253 253 253
57465-253 253 253 253 253 253 221 221 221 6 6 6
57466- 2 2 6 2 2 6 6 6 6 30 30 30
57467- 2 2 6 2 2 6 2 2 6 2 2 6
57468- 2 2 6 82 82 82 54 54 54 18 18 18
57469- 6 6 6 0 0 0 0 0 0 0 0 0
57470- 0 0 0 0 0 0 0 0 0 0 0 0
57471- 0 0 0 0 0 0 0 0 0 0 0 0
57472- 0 0 0 0 0 0 0 0 0 0 0 0
57473- 0 0 0 0 0 0 0 0 0 0 0 0
57474- 0 0 0 0 0 0 0 0 0 0 0 0
57475- 0 0 0 0 0 0 0 0 0 10 10 10
57476- 26 26 26 66 66 66 62 62 62 2 2 6
57477- 2 2 6 38 38 38 10 10 10 26 26 26
57478-238 238 238 253 253 253 253 253 253 253 253 253
57479-253 253 253 253 253 253 253 253 253 253 253 253
57480-253 253 253 253 253 253 231 231 231 238 238 238
57481-253 253 253 253 253 253 253 253 253 253 253 253
57482-253 253 253 253 253 253 253 253 253 253 253 253
57483-253 253 253 253 253 253 253 253 253 253 253 253
57484-253 253 253 253 253 253 253 253 253 253 253 253
57485-253 253 253 253 253 253 231 231 231 6 6 6
57486- 2 2 6 2 2 6 10 10 10 30 30 30
57487- 2 2 6 2 2 6 2 2 6 2 2 6
57488- 2 2 6 66 66 66 58 58 58 22 22 22
57489- 6 6 6 0 0 0 0 0 0 0 0 0
57490- 0 0 0 0 0 0 0 0 0 0 0 0
57491- 0 0 0 0 0 0 0 0 0 0 0 0
57492- 0 0 0 0 0 0 0 0 0 0 0 0
57493- 0 0 0 0 0 0 0 0 0 0 0 0
57494- 0 0 0 0 0 0 0 0 0 0 0 0
57495- 0 0 0 0 0 0 0 0 0 10 10 10
57496- 38 38 38 78 78 78 6 6 6 2 2 6
57497- 2 2 6 46 46 46 14 14 14 42 42 42
57498-246 246 246 253 253 253 253 253 253 253 253 253
57499-253 253 253 253 253 253 253 253 253 253 253 253
57500-253 253 253 253 253 253 231 231 231 242 242 242
57501-253 253 253 253 253 253 253 253 253 253 253 253
57502-253 253 253 253 253 253 253 253 253 253 253 253
57503-253 253 253 253 253 253 253 253 253 253 253 253
57504-253 253 253 253 253 253 253 253 253 253 253 253
57505-253 253 253 253 253 253 234 234 234 10 10 10
57506- 2 2 6 2 2 6 22 22 22 14 14 14
57507- 2 2 6 2 2 6 2 2 6 2 2 6
57508- 2 2 6 66 66 66 62 62 62 22 22 22
57509- 6 6 6 0 0 0 0 0 0 0 0 0
57510- 0 0 0 0 0 0 0 0 0 0 0 0
57511- 0 0 0 0 0 0 0 0 0 0 0 0
57512- 0 0 0 0 0 0 0 0 0 0 0 0
57513- 0 0 0 0 0 0 0 0 0 0 0 0
57514- 0 0 0 0 0 0 0 0 0 0 0 0
57515- 0 0 0 0 0 0 6 6 6 18 18 18
57516- 50 50 50 74 74 74 2 2 6 2 2 6
57517- 14 14 14 70 70 70 34 34 34 62 62 62
57518-250 250 250 253 253 253 253 253 253 253 253 253
57519-253 253 253 253 253 253 253 253 253 253 253 253
57520-253 253 253 253 253 253 231 231 231 246 246 246
57521-253 253 253 253 253 253 253 253 253 253 253 253
57522-253 253 253 253 253 253 253 253 253 253 253 253
57523-253 253 253 253 253 253 253 253 253 253 253 253
57524-253 253 253 253 253 253 253 253 253 253 253 253
57525-253 253 253 253 253 253 234 234 234 14 14 14
57526- 2 2 6 2 2 6 30 30 30 2 2 6
57527- 2 2 6 2 2 6 2 2 6 2 2 6
57528- 2 2 6 66 66 66 62 62 62 22 22 22
57529- 6 6 6 0 0 0 0 0 0 0 0 0
57530- 0 0 0 0 0 0 0 0 0 0 0 0
57531- 0 0 0 0 0 0 0 0 0 0 0 0
57532- 0 0 0 0 0 0 0 0 0 0 0 0
57533- 0 0 0 0 0 0 0 0 0 0 0 0
57534- 0 0 0 0 0 0 0 0 0 0 0 0
57535- 0 0 0 0 0 0 6 6 6 18 18 18
57536- 54 54 54 62 62 62 2 2 6 2 2 6
57537- 2 2 6 30 30 30 46 46 46 70 70 70
57538-250 250 250 253 253 253 253 253 253 253 253 253
57539-253 253 253 253 253 253 253 253 253 253 253 253
57540-253 253 253 253 253 253 231 231 231 246 246 246
57541-253 253 253 253 253 253 253 253 253 253 253 253
57542-253 253 253 253 253 253 253 253 253 253 253 253
57543-253 253 253 253 253 253 253 253 253 253 253 253
57544-253 253 253 253 253 253 253 253 253 253 253 253
57545-253 253 253 253 253 253 226 226 226 10 10 10
57546- 2 2 6 6 6 6 30 30 30 2 2 6
57547- 2 2 6 2 2 6 2 2 6 2 2 6
57548- 2 2 6 66 66 66 58 58 58 22 22 22
57549- 6 6 6 0 0 0 0 0 0 0 0 0
57550- 0 0 0 0 0 0 0 0 0 0 0 0
57551- 0 0 0 0 0 0 0 0 0 0 0 0
57552- 0 0 0 0 0 0 0 0 0 0 0 0
57553- 0 0 0 0 0 0 0 0 0 0 0 0
57554- 0 0 0 0 0 0 0 0 0 0 0 0
57555- 0 0 0 0 0 0 6 6 6 22 22 22
57556- 58 58 58 62 62 62 2 2 6 2 2 6
57557- 2 2 6 2 2 6 30 30 30 78 78 78
57558-250 250 250 253 253 253 253 253 253 253 253 253
57559-253 253 253 253 253 253 253 253 253 253 253 253
57560-253 253 253 253 253 253 231 231 231 246 246 246
57561-253 253 253 253 253 253 253 253 253 253 253 253
57562-253 253 253 253 253 253 253 253 253 253 253 253
57563-253 253 253 253 253 253 253 253 253 253 253 253
57564-253 253 253 253 253 253 253 253 253 253 253 253
57565-253 253 253 253 253 253 206 206 206 2 2 6
57566- 22 22 22 34 34 34 18 14 6 22 22 22
57567- 26 26 26 18 18 18 6 6 6 2 2 6
57568- 2 2 6 82 82 82 54 54 54 18 18 18
57569- 6 6 6 0 0 0 0 0 0 0 0 0
57570- 0 0 0 0 0 0 0 0 0 0 0 0
57571- 0 0 0 0 0 0 0 0 0 0 0 0
57572- 0 0 0 0 0 0 0 0 0 0 0 0
57573- 0 0 0 0 0 0 0 0 0 0 0 0
57574- 0 0 0 0 0 0 0 0 0 0 0 0
57575- 0 0 0 0 0 0 6 6 6 26 26 26
57576- 62 62 62 106 106 106 74 54 14 185 133 11
57577-210 162 10 121 92 8 6 6 6 62 62 62
57578-238 238 238 253 253 253 253 253 253 253 253 253
57579-253 253 253 253 253 253 253 253 253 253 253 253
57580-253 253 253 253 253 253 231 231 231 246 246 246
57581-253 253 253 253 253 253 253 253 253 253 253 253
57582-253 253 253 253 253 253 253 253 253 253 253 253
57583-253 253 253 253 253 253 253 253 253 253 253 253
57584-253 253 253 253 253 253 253 253 253 253 253 253
57585-253 253 253 253 253 253 158 158 158 18 18 18
57586- 14 14 14 2 2 6 2 2 6 2 2 6
57587- 6 6 6 18 18 18 66 66 66 38 38 38
57588- 6 6 6 94 94 94 50 50 50 18 18 18
57589- 6 6 6 0 0 0 0 0 0 0 0 0
57590- 0 0 0 0 0 0 0 0 0 0 0 0
57591- 0 0 0 0 0 0 0 0 0 0 0 0
57592- 0 0 0 0 0 0 0 0 0 0 0 0
57593- 0 0 0 0 0 0 0 0 0 0 0 0
57594- 0 0 0 0 0 0 0 0 0 6 6 6
57595- 10 10 10 10 10 10 18 18 18 38 38 38
57596- 78 78 78 142 134 106 216 158 10 242 186 14
57597-246 190 14 246 190 14 156 118 10 10 10 10
57598- 90 90 90 238 238 238 253 253 253 253 253 253
57599-253 253 253 253 253 253 253 253 253 253 253 253
57600-253 253 253 253 253 253 231 231 231 250 250 250
57601-253 253 253 253 253 253 253 253 253 253 253 253
57602-253 253 253 253 253 253 253 253 253 253 253 253
57603-253 253 253 253 253 253 253 253 253 253 253 253
57604-253 253 253 253 253 253 253 253 253 246 230 190
57605-238 204 91 238 204 91 181 142 44 37 26 9
57606- 2 2 6 2 2 6 2 2 6 2 2 6
57607- 2 2 6 2 2 6 38 38 38 46 46 46
57608- 26 26 26 106 106 106 54 54 54 18 18 18
57609- 6 6 6 0 0 0 0 0 0 0 0 0
57610- 0 0 0 0 0 0 0 0 0 0 0 0
57611- 0 0 0 0 0 0 0 0 0 0 0 0
57612- 0 0 0 0 0 0 0 0 0 0 0 0
57613- 0 0 0 0 0 0 0 0 0 0 0 0
57614- 0 0 0 6 6 6 14 14 14 22 22 22
57615- 30 30 30 38 38 38 50 50 50 70 70 70
57616-106 106 106 190 142 34 226 170 11 242 186 14
57617-246 190 14 246 190 14 246 190 14 154 114 10
57618- 6 6 6 74 74 74 226 226 226 253 253 253
57619-253 253 253 253 253 253 253 253 253 253 253 253
57620-253 253 253 253 253 253 231 231 231 250 250 250
57621-253 253 253 253 253 253 253 253 253 253 253 253
57622-253 253 253 253 253 253 253 253 253 253 253 253
57623-253 253 253 253 253 253 253 253 253 253 253 253
57624-253 253 253 253 253 253 253 253 253 228 184 62
57625-241 196 14 241 208 19 232 195 16 38 30 10
57626- 2 2 6 2 2 6 2 2 6 2 2 6
57627- 2 2 6 6 6 6 30 30 30 26 26 26
57628-203 166 17 154 142 90 66 66 66 26 26 26
57629- 6 6 6 0 0 0 0 0 0 0 0 0
57630- 0 0 0 0 0 0 0 0 0 0 0 0
57631- 0 0 0 0 0 0 0 0 0 0 0 0
57632- 0 0 0 0 0 0 0 0 0 0 0 0
57633- 0 0 0 0 0 0 0 0 0 0 0 0
57634- 6 6 6 18 18 18 38 38 38 58 58 58
57635- 78 78 78 86 86 86 101 101 101 123 123 123
57636-175 146 61 210 150 10 234 174 13 246 186 14
57637-246 190 14 246 190 14 246 190 14 238 190 10
57638-102 78 10 2 2 6 46 46 46 198 198 198
57639-253 253 253 253 253 253 253 253 253 253 253 253
57640-253 253 253 253 253 253 234 234 234 242 242 242
57641-253 253 253 253 253 253 253 253 253 253 253 253
57642-253 253 253 253 253 253 253 253 253 253 253 253
57643-253 253 253 253 253 253 253 253 253 253 253 253
57644-253 253 253 253 253 253 253 253 253 224 178 62
57645-242 186 14 241 196 14 210 166 10 22 18 6
57646- 2 2 6 2 2 6 2 2 6 2 2 6
57647- 2 2 6 2 2 6 6 6 6 121 92 8
57648-238 202 15 232 195 16 82 82 82 34 34 34
57649- 10 10 10 0 0 0 0 0 0 0 0 0
57650- 0 0 0 0 0 0 0 0 0 0 0 0
57651- 0 0 0 0 0 0 0 0 0 0 0 0
57652- 0 0 0 0 0 0 0 0 0 0 0 0
57653- 0 0 0 0 0 0 0 0 0 0 0 0
57654- 14 14 14 38 38 38 70 70 70 154 122 46
57655-190 142 34 200 144 11 197 138 11 197 138 11
57656-213 154 11 226 170 11 242 186 14 246 190 14
57657-246 190 14 246 190 14 246 190 14 246 190 14
57658-225 175 15 46 32 6 2 2 6 22 22 22
57659-158 158 158 250 250 250 253 253 253 253 253 253
57660-253 253 253 253 253 253 253 253 253 253 253 253
57661-253 253 253 253 253 253 253 253 253 253 253 253
57662-253 253 253 253 253 253 253 253 253 253 253 253
57663-253 253 253 253 253 253 253 253 253 253 253 253
57664-253 253 253 250 250 250 242 242 242 224 178 62
57665-239 182 13 236 186 11 213 154 11 46 32 6
57666- 2 2 6 2 2 6 2 2 6 2 2 6
57667- 2 2 6 2 2 6 61 42 6 225 175 15
57668-238 190 10 236 186 11 112 100 78 42 42 42
57669- 14 14 14 0 0 0 0 0 0 0 0 0
57670- 0 0 0 0 0 0 0 0 0 0 0 0
57671- 0 0 0 0 0 0 0 0 0 0 0 0
57672- 0 0 0 0 0 0 0 0 0 0 0 0
57673- 0 0 0 0 0 0 0 0 0 6 6 6
57674- 22 22 22 54 54 54 154 122 46 213 154 11
57675-226 170 11 230 174 11 226 170 11 226 170 11
57676-236 178 12 242 186 14 246 190 14 246 190 14
57677-246 190 14 246 190 14 246 190 14 246 190 14
57678-241 196 14 184 144 12 10 10 10 2 2 6
57679- 6 6 6 116 116 116 242 242 242 253 253 253
57680-253 253 253 253 253 253 253 253 253 253 253 253
57681-253 253 253 253 253 253 253 253 253 253 253 253
57682-253 253 253 253 253 253 253 253 253 253 253 253
57683-253 253 253 253 253 253 253 253 253 253 253 253
57684-253 253 253 231 231 231 198 198 198 214 170 54
57685-236 178 12 236 178 12 210 150 10 137 92 6
57686- 18 14 6 2 2 6 2 2 6 2 2 6
57687- 6 6 6 70 47 6 200 144 11 236 178 12
57688-239 182 13 239 182 13 124 112 88 58 58 58
57689- 22 22 22 6 6 6 0 0 0 0 0 0
57690- 0 0 0 0 0 0 0 0 0 0 0 0
57691- 0 0 0 0 0 0 0 0 0 0 0 0
57692- 0 0 0 0 0 0 0 0 0 0 0 0
57693- 0 0 0 0 0 0 0 0 0 10 10 10
57694- 30 30 30 70 70 70 180 133 36 226 170 11
57695-239 182 13 242 186 14 242 186 14 246 186 14
57696-246 190 14 246 190 14 246 190 14 246 190 14
57697-246 190 14 246 190 14 246 190 14 246 190 14
57698-246 190 14 232 195 16 98 70 6 2 2 6
57699- 2 2 6 2 2 6 66 66 66 221 221 221
57700-253 253 253 253 253 253 253 253 253 253 253 253
57701-253 253 253 253 253 253 253 253 253 253 253 253
57702-253 253 253 253 253 253 253 253 253 253 253 253
57703-253 253 253 253 253 253 253 253 253 253 253 253
57704-253 253 253 206 206 206 198 198 198 214 166 58
57705-230 174 11 230 174 11 216 158 10 192 133 9
57706-163 110 8 116 81 8 102 78 10 116 81 8
57707-167 114 7 197 138 11 226 170 11 239 182 13
57708-242 186 14 242 186 14 162 146 94 78 78 78
57709- 34 34 34 14 14 14 6 6 6 0 0 0
57710- 0 0 0 0 0 0 0 0 0 0 0 0
57711- 0 0 0 0 0 0 0 0 0 0 0 0
57712- 0 0 0 0 0 0 0 0 0 0 0 0
57713- 0 0 0 0 0 0 0 0 0 6 6 6
57714- 30 30 30 78 78 78 190 142 34 226 170 11
57715-239 182 13 246 190 14 246 190 14 246 190 14
57716-246 190 14 246 190 14 246 190 14 246 190 14
57717-246 190 14 246 190 14 246 190 14 246 190 14
57718-246 190 14 241 196 14 203 166 17 22 18 6
57719- 2 2 6 2 2 6 2 2 6 38 38 38
57720-218 218 218 253 253 253 253 253 253 253 253 253
57721-253 253 253 253 253 253 253 253 253 253 253 253
57722-253 253 253 253 253 253 253 253 253 253 253 253
57723-253 253 253 253 253 253 253 253 253 253 253 253
57724-250 250 250 206 206 206 198 198 198 202 162 69
57725-226 170 11 236 178 12 224 166 10 210 150 10
57726-200 144 11 197 138 11 192 133 9 197 138 11
57727-210 150 10 226 170 11 242 186 14 246 190 14
57728-246 190 14 246 186 14 225 175 15 124 112 88
57729- 62 62 62 30 30 30 14 14 14 6 6 6
57730- 0 0 0 0 0 0 0 0 0 0 0 0
57731- 0 0 0 0 0 0 0 0 0 0 0 0
57732- 0 0 0 0 0 0 0 0 0 0 0 0
57733- 0 0 0 0 0 0 0 0 0 10 10 10
57734- 30 30 30 78 78 78 174 135 50 224 166 10
57735-239 182 13 246 190 14 246 190 14 246 190 14
57736-246 190 14 246 190 14 246 190 14 246 190 14
57737-246 190 14 246 190 14 246 190 14 246 190 14
57738-246 190 14 246 190 14 241 196 14 139 102 15
57739- 2 2 6 2 2 6 2 2 6 2 2 6
57740- 78 78 78 250 250 250 253 253 253 253 253 253
57741-253 253 253 253 253 253 253 253 253 253 253 253
57742-253 253 253 253 253 253 253 253 253 253 253 253
57743-253 253 253 253 253 253 253 253 253 253 253 253
57744-250 250 250 214 214 214 198 198 198 190 150 46
57745-219 162 10 236 178 12 234 174 13 224 166 10
57746-216 158 10 213 154 11 213 154 11 216 158 10
57747-226 170 11 239 182 13 246 190 14 246 190 14
57748-246 190 14 246 190 14 242 186 14 206 162 42
57749-101 101 101 58 58 58 30 30 30 14 14 14
57750- 6 6 6 0 0 0 0 0 0 0 0 0
57751- 0 0 0 0 0 0 0 0 0 0 0 0
57752- 0 0 0 0 0 0 0 0 0 0 0 0
57753- 0 0 0 0 0 0 0 0 0 10 10 10
57754- 30 30 30 74 74 74 174 135 50 216 158 10
57755-236 178 12 246 190 14 246 190 14 246 190 14
57756-246 190 14 246 190 14 246 190 14 246 190 14
57757-246 190 14 246 190 14 246 190 14 246 190 14
57758-246 190 14 246 190 14 241 196 14 226 184 13
57759- 61 42 6 2 2 6 2 2 6 2 2 6
57760- 22 22 22 238 238 238 253 253 253 253 253 253
57761-253 253 253 253 253 253 253 253 253 253 253 253
57762-253 253 253 253 253 253 253 253 253 253 253 253
57763-253 253 253 253 253 253 253 253 253 253 253 253
57764-253 253 253 226 226 226 187 187 187 180 133 36
57765-216 158 10 236 178 12 239 182 13 236 178 12
57766-230 174 11 226 170 11 226 170 11 230 174 11
57767-236 178 12 242 186 14 246 190 14 246 190 14
57768-246 190 14 246 190 14 246 186 14 239 182 13
57769-206 162 42 106 106 106 66 66 66 34 34 34
57770- 14 14 14 6 6 6 0 0 0 0 0 0
57771- 0 0 0 0 0 0 0 0 0 0 0 0
57772- 0 0 0 0 0 0 0 0 0 0 0 0
57773- 0 0 0 0 0 0 0 0 0 6 6 6
57774- 26 26 26 70 70 70 163 133 67 213 154 11
57775-236 178 12 246 190 14 246 190 14 246 190 14
57776-246 190 14 246 190 14 246 190 14 246 190 14
57777-246 190 14 246 190 14 246 190 14 246 190 14
57778-246 190 14 246 190 14 246 190 14 241 196 14
57779-190 146 13 18 14 6 2 2 6 2 2 6
57780- 46 46 46 246 246 246 253 253 253 253 253 253
57781-253 253 253 253 253 253 253 253 253 253 253 253
57782-253 253 253 253 253 253 253 253 253 253 253 253
57783-253 253 253 253 253 253 253 253 253 253 253 253
57784-253 253 253 221 221 221 86 86 86 156 107 11
57785-216 158 10 236 178 12 242 186 14 246 186 14
57786-242 186 14 239 182 13 239 182 13 242 186 14
57787-242 186 14 246 186 14 246 190 14 246 190 14
57788-246 190 14 246 190 14 246 190 14 246 190 14
57789-242 186 14 225 175 15 142 122 72 66 66 66
57790- 30 30 30 10 10 10 0 0 0 0 0 0
57791- 0 0 0 0 0 0 0 0 0 0 0 0
57792- 0 0 0 0 0 0 0 0 0 0 0 0
57793- 0 0 0 0 0 0 0 0 0 6 6 6
57794- 26 26 26 70 70 70 163 133 67 210 150 10
57795-236 178 12 246 190 14 246 190 14 246 190 14
57796-246 190 14 246 190 14 246 190 14 246 190 14
57797-246 190 14 246 190 14 246 190 14 246 190 14
57798-246 190 14 246 190 14 246 190 14 246 190 14
57799-232 195 16 121 92 8 34 34 34 106 106 106
57800-221 221 221 253 253 253 253 253 253 253 253 253
57801-253 253 253 253 253 253 253 253 253 253 253 253
57802-253 253 253 253 253 253 253 253 253 253 253 253
57803-253 253 253 253 253 253 253 253 253 253 253 253
57804-242 242 242 82 82 82 18 14 6 163 110 8
57805-216 158 10 236 178 12 242 186 14 246 190 14
57806-246 190 14 246 190 14 246 190 14 246 190 14
57807-246 190 14 246 190 14 246 190 14 246 190 14
57808-246 190 14 246 190 14 246 190 14 246 190 14
57809-246 190 14 246 190 14 242 186 14 163 133 67
57810- 46 46 46 18 18 18 6 6 6 0 0 0
57811- 0 0 0 0 0 0 0 0 0 0 0 0
57812- 0 0 0 0 0 0 0 0 0 0 0 0
57813- 0 0 0 0 0 0 0 0 0 10 10 10
57814- 30 30 30 78 78 78 163 133 67 210 150 10
57815-236 178 12 246 186 14 246 190 14 246 190 14
57816-246 190 14 246 190 14 246 190 14 246 190 14
57817-246 190 14 246 190 14 246 190 14 246 190 14
57818-246 190 14 246 190 14 246 190 14 246 190 14
57819-241 196 14 215 174 15 190 178 144 253 253 253
57820-253 253 253 253 253 253 253 253 253 253 253 253
57821-253 253 253 253 253 253 253 253 253 253 253 253
57822-253 253 253 253 253 253 253 253 253 253 253 253
57823-253 253 253 253 253 253 253 253 253 218 218 218
57824- 58 58 58 2 2 6 22 18 6 167 114 7
57825-216 158 10 236 178 12 246 186 14 246 190 14
57826-246 190 14 246 190 14 246 190 14 246 190 14
57827-246 190 14 246 190 14 246 190 14 246 190 14
57828-246 190 14 246 190 14 246 190 14 246 190 14
57829-246 190 14 246 186 14 242 186 14 190 150 46
57830- 54 54 54 22 22 22 6 6 6 0 0 0
57831- 0 0 0 0 0 0 0 0 0 0 0 0
57832- 0 0 0 0 0 0 0 0 0 0 0 0
57833- 0 0 0 0 0 0 0 0 0 14 14 14
57834- 38 38 38 86 86 86 180 133 36 213 154 11
57835-236 178 12 246 186 14 246 190 14 246 190 14
57836-246 190 14 246 190 14 246 190 14 246 190 14
57837-246 190 14 246 190 14 246 190 14 246 190 14
57838-246 190 14 246 190 14 246 190 14 246 190 14
57839-246 190 14 232 195 16 190 146 13 214 214 214
57840-253 253 253 253 253 253 253 253 253 253 253 253
57841-253 253 253 253 253 253 253 253 253 253 253 253
57842-253 253 253 253 253 253 253 253 253 253 253 253
57843-253 253 253 250 250 250 170 170 170 26 26 26
57844- 2 2 6 2 2 6 37 26 9 163 110 8
57845-219 162 10 239 182 13 246 186 14 246 190 14
57846-246 190 14 246 190 14 246 190 14 246 190 14
57847-246 190 14 246 190 14 246 190 14 246 190 14
57848-246 190 14 246 190 14 246 190 14 246 190 14
57849-246 186 14 236 178 12 224 166 10 142 122 72
57850- 46 46 46 18 18 18 6 6 6 0 0 0
57851- 0 0 0 0 0 0 0 0 0 0 0 0
57852- 0 0 0 0 0 0 0 0 0 0 0 0
57853- 0 0 0 0 0 0 6 6 6 18 18 18
57854- 50 50 50 109 106 95 192 133 9 224 166 10
57855-242 186 14 246 190 14 246 190 14 246 190 14
57856-246 190 14 246 190 14 246 190 14 246 190 14
57857-246 190 14 246 190 14 246 190 14 246 190 14
57858-246 190 14 246 190 14 246 190 14 246 190 14
57859-242 186 14 226 184 13 210 162 10 142 110 46
57860-226 226 226 253 253 253 253 253 253 253 253 253
57861-253 253 253 253 253 253 253 253 253 253 253 253
57862-253 253 253 253 253 253 253 253 253 253 253 253
57863-198 198 198 66 66 66 2 2 6 2 2 6
57864- 2 2 6 2 2 6 50 34 6 156 107 11
57865-219 162 10 239 182 13 246 186 14 246 190 14
57866-246 190 14 246 190 14 246 190 14 246 190 14
57867-246 190 14 246 190 14 246 190 14 246 190 14
57868-246 190 14 246 190 14 246 190 14 242 186 14
57869-234 174 13 213 154 11 154 122 46 66 66 66
57870- 30 30 30 10 10 10 0 0 0 0 0 0
57871- 0 0 0 0 0 0 0 0 0 0 0 0
57872- 0 0 0 0 0 0 0 0 0 0 0 0
57873- 0 0 0 0 0 0 6 6 6 22 22 22
57874- 58 58 58 154 121 60 206 145 10 234 174 13
57875-242 186 14 246 186 14 246 190 14 246 190 14
57876-246 190 14 246 190 14 246 190 14 246 190 14
57877-246 190 14 246 190 14 246 190 14 246 190 14
57878-246 190 14 246 190 14 246 190 14 246 190 14
57879-246 186 14 236 178 12 210 162 10 163 110 8
57880- 61 42 6 138 138 138 218 218 218 250 250 250
57881-253 253 253 253 253 253 253 253 253 250 250 250
57882-242 242 242 210 210 210 144 144 144 66 66 66
57883- 6 6 6 2 2 6 2 2 6 2 2 6
57884- 2 2 6 2 2 6 61 42 6 163 110 8
57885-216 158 10 236 178 12 246 190 14 246 190 14
57886-246 190 14 246 190 14 246 190 14 246 190 14
57887-246 190 14 246 190 14 246 190 14 246 190 14
57888-246 190 14 239 182 13 230 174 11 216 158 10
57889-190 142 34 124 112 88 70 70 70 38 38 38
57890- 18 18 18 6 6 6 0 0 0 0 0 0
57891- 0 0 0 0 0 0 0 0 0 0 0 0
57892- 0 0 0 0 0 0 0 0 0 0 0 0
57893- 0 0 0 0 0 0 6 6 6 22 22 22
57894- 62 62 62 168 124 44 206 145 10 224 166 10
57895-236 178 12 239 182 13 242 186 14 242 186 14
57896-246 186 14 246 190 14 246 190 14 246 190 14
57897-246 190 14 246 190 14 246 190 14 246 190 14
57898-246 190 14 246 190 14 246 190 14 246 190 14
57899-246 190 14 236 178 12 216 158 10 175 118 6
57900- 80 54 7 2 2 6 6 6 6 30 30 30
57901- 54 54 54 62 62 62 50 50 50 38 38 38
57902- 14 14 14 2 2 6 2 2 6 2 2 6
57903- 2 2 6 2 2 6 2 2 6 2 2 6
57904- 2 2 6 6 6 6 80 54 7 167 114 7
57905-213 154 11 236 178 12 246 190 14 246 190 14
57906-246 190 14 246 190 14 246 190 14 246 190 14
57907-246 190 14 242 186 14 239 182 13 239 182 13
57908-230 174 11 210 150 10 174 135 50 124 112 88
57909- 82 82 82 54 54 54 34 34 34 18 18 18
57910- 6 6 6 0 0 0 0 0 0 0 0 0
57911- 0 0 0 0 0 0 0 0 0 0 0 0
57912- 0 0 0 0 0 0 0 0 0 0 0 0
57913- 0 0 0 0 0 0 6 6 6 18 18 18
57914- 50 50 50 158 118 36 192 133 9 200 144 11
57915-216 158 10 219 162 10 224 166 10 226 170 11
57916-230 174 11 236 178 12 239 182 13 239 182 13
57917-242 186 14 246 186 14 246 190 14 246 190 14
57918-246 190 14 246 190 14 246 190 14 246 190 14
57919-246 186 14 230 174 11 210 150 10 163 110 8
57920-104 69 6 10 10 10 2 2 6 2 2 6
57921- 2 2 6 2 2 6 2 2 6 2 2 6
57922- 2 2 6 2 2 6 2 2 6 2 2 6
57923- 2 2 6 2 2 6 2 2 6 2 2 6
57924- 2 2 6 6 6 6 91 60 6 167 114 7
57925-206 145 10 230 174 11 242 186 14 246 190 14
57926-246 190 14 246 190 14 246 186 14 242 186 14
57927-239 182 13 230 174 11 224 166 10 213 154 11
57928-180 133 36 124 112 88 86 86 86 58 58 58
57929- 38 38 38 22 22 22 10 10 10 6 6 6
57930- 0 0 0 0 0 0 0 0 0 0 0 0
57931- 0 0 0 0 0 0 0 0 0 0 0 0
57932- 0 0 0 0 0 0 0 0 0 0 0 0
57933- 0 0 0 0 0 0 0 0 0 14 14 14
57934- 34 34 34 70 70 70 138 110 50 158 118 36
57935-167 114 7 180 123 7 192 133 9 197 138 11
57936-200 144 11 206 145 10 213 154 11 219 162 10
57937-224 166 10 230 174 11 239 182 13 242 186 14
57938-246 186 14 246 186 14 246 186 14 246 186 14
57939-239 182 13 216 158 10 185 133 11 152 99 6
57940-104 69 6 18 14 6 2 2 6 2 2 6
57941- 2 2 6 2 2 6 2 2 6 2 2 6
57942- 2 2 6 2 2 6 2 2 6 2 2 6
57943- 2 2 6 2 2 6 2 2 6 2 2 6
57944- 2 2 6 6 6 6 80 54 7 152 99 6
57945-192 133 9 219 162 10 236 178 12 239 182 13
57946-246 186 14 242 186 14 239 182 13 236 178 12
57947-224 166 10 206 145 10 192 133 9 154 121 60
57948- 94 94 94 62 62 62 42 42 42 22 22 22
57949- 14 14 14 6 6 6 0 0 0 0 0 0
57950- 0 0 0 0 0 0 0 0 0 0 0 0
57951- 0 0 0 0 0 0 0 0 0 0 0 0
57952- 0 0 0 0 0 0 0 0 0 0 0 0
57953- 0 0 0 0 0 0 0 0 0 6 6 6
57954- 18 18 18 34 34 34 58 58 58 78 78 78
57955-101 98 89 124 112 88 142 110 46 156 107 11
57956-163 110 8 167 114 7 175 118 6 180 123 7
57957-185 133 11 197 138 11 210 150 10 219 162 10
57958-226 170 11 236 178 12 236 178 12 234 174 13
57959-219 162 10 197 138 11 163 110 8 130 83 6
57960- 91 60 6 10 10 10 2 2 6 2 2 6
57961- 18 18 18 38 38 38 38 38 38 38 38 38
57962- 38 38 38 38 38 38 38 38 38 38 38 38
57963- 38 38 38 38 38 38 26 26 26 2 2 6
57964- 2 2 6 6 6 6 70 47 6 137 92 6
57965-175 118 6 200 144 11 219 162 10 230 174 11
57966-234 174 13 230 174 11 219 162 10 210 150 10
57967-192 133 9 163 110 8 124 112 88 82 82 82
57968- 50 50 50 30 30 30 14 14 14 6 6 6
57969- 0 0 0 0 0 0 0 0 0 0 0 0
57970- 0 0 0 0 0 0 0 0 0 0 0 0
57971- 0 0 0 0 0 0 0 0 0 0 0 0
57972- 0 0 0 0 0 0 0 0 0 0 0 0
57973- 0 0 0 0 0 0 0 0 0 0 0 0
57974- 6 6 6 14 14 14 22 22 22 34 34 34
57975- 42 42 42 58 58 58 74 74 74 86 86 86
57976-101 98 89 122 102 70 130 98 46 121 87 25
57977-137 92 6 152 99 6 163 110 8 180 123 7
57978-185 133 11 197 138 11 206 145 10 200 144 11
57979-180 123 7 156 107 11 130 83 6 104 69 6
57980- 50 34 6 54 54 54 110 110 110 101 98 89
57981- 86 86 86 82 82 82 78 78 78 78 78 78
57982- 78 78 78 78 78 78 78 78 78 78 78 78
57983- 78 78 78 82 82 82 86 86 86 94 94 94
57984-106 106 106 101 101 101 86 66 34 124 80 6
57985-156 107 11 180 123 7 192 133 9 200 144 11
57986-206 145 10 200 144 11 192 133 9 175 118 6
57987-139 102 15 109 106 95 70 70 70 42 42 42
57988- 22 22 22 10 10 10 0 0 0 0 0 0
57989- 0 0 0 0 0 0 0 0 0 0 0 0
57990- 0 0 0 0 0 0 0 0 0 0 0 0
57991- 0 0 0 0 0 0 0 0 0 0 0 0
57992- 0 0 0 0 0 0 0 0 0 0 0 0
57993- 0 0 0 0 0 0 0 0 0 0 0 0
57994- 0 0 0 0 0 0 6 6 6 10 10 10
57995- 14 14 14 22 22 22 30 30 30 38 38 38
57996- 50 50 50 62 62 62 74 74 74 90 90 90
57997-101 98 89 112 100 78 121 87 25 124 80 6
57998-137 92 6 152 99 6 152 99 6 152 99 6
57999-138 86 6 124 80 6 98 70 6 86 66 30
58000-101 98 89 82 82 82 58 58 58 46 46 46
58001- 38 38 38 34 34 34 34 34 34 34 34 34
58002- 34 34 34 34 34 34 34 34 34 34 34 34
58003- 34 34 34 34 34 34 38 38 38 42 42 42
58004- 54 54 54 82 82 82 94 86 76 91 60 6
58005-134 86 6 156 107 11 167 114 7 175 118 6
58006-175 118 6 167 114 7 152 99 6 121 87 25
58007-101 98 89 62 62 62 34 34 34 18 18 18
58008- 6 6 6 0 0 0 0 0 0 0 0 0
58009- 0 0 0 0 0 0 0 0 0 0 0 0
58010- 0 0 0 0 0 0 0 0 0 0 0 0
58011- 0 0 0 0 0 0 0 0 0 0 0 0
58012- 0 0 0 0 0 0 0 0 0 0 0 0
58013- 0 0 0 0 0 0 0 0 0 0 0 0
58014- 0 0 0 0 0 0 0 0 0 0 0 0
58015- 0 0 0 6 6 6 6 6 6 10 10 10
58016- 18 18 18 22 22 22 30 30 30 42 42 42
58017- 50 50 50 66 66 66 86 86 86 101 98 89
58018-106 86 58 98 70 6 104 69 6 104 69 6
58019-104 69 6 91 60 6 82 62 34 90 90 90
58020- 62 62 62 38 38 38 22 22 22 14 14 14
58021- 10 10 10 10 10 10 10 10 10 10 10 10
58022- 10 10 10 10 10 10 6 6 6 10 10 10
58023- 10 10 10 10 10 10 10 10 10 14 14 14
58024- 22 22 22 42 42 42 70 70 70 89 81 66
58025- 80 54 7 104 69 6 124 80 6 137 92 6
58026-134 86 6 116 81 8 100 82 52 86 86 86
58027- 58 58 58 30 30 30 14 14 14 6 6 6
58028- 0 0 0 0 0 0 0 0 0 0 0 0
58029- 0 0 0 0 0 0 0 0 0 0 0 0
58030- 0 0 0 0 0 0 0 0 0 0 0 0
58031- 0 0 0 0 0 0 0 0 0 0 0 0
58032- 0 0 0 0 0 0 0 0 0 0 0 0
58033- 0 0 0 0 0 0 0 0 0 0 0 0
58034- 0 0 0 0 0 0 0 0 0 0 0 0
58035- 0 0 0 0 0 0 0 0 0 0 0 0
58036- 0 0 0 6 6 6 10 10 10 14 14 14
58037- 18 18 18 26 26 26 38 38 38 54 54 54
58038- 70 70 70 86 86 86 94 86 76 89 81 66
58039- 89 81 66 86 86 86 74 74 74 50 50 50
58040- 30 30 30 14 14 14 6 6 6 0 0 0
58041- 0 0 0 0 0 0 0 0 0 0 0 0
58042- 0 0 0 0 0 0 0 0 0 0 0 0
58043- 0 0 0 0 0 0 0 0 0 0 0 0
58044- 6 6 6 18 18 18 34 34 34 58 58 58
58045- 82 82 82 89 81 66 89 81 66 89 81 66
58046- 94 86 66 94 86 76 74 74 74 50 50 50
58047- 26 26 26 14 14 14 6 6 6 0 0 0
58048- 0 0 0 0 0 0 0 0 0 0 0 0
58049- 0 0 0 0 0 0 0 0 0 0 0 0
58050- 0 0 0 0 0 0 0 0 0 0 0 0
58051- 0 0 0 0 0 0 0 0 0 0 0 0
58052- 0 0 0 0 0 0 0 0 0 0 0 0
58053- 0 0 0 0 0 0 0 0 0 0 0 0
58054- 0 0 0 0 0 0 0 0 0 0 0 0
58055- 0 0 0 0 0 0 0 0 0 0 0 0
58056- 0 0 0 0 0 0 0 0 0 0 0 0
58057- 6 6 6 6 6 6 14 14 14 18 18 18
58058- 30 30 30 38 38 38 46 46 46 54 54 54
58059- 50 50 50 42 42 42 30 30 30 18 18 18
58060- 10 10 10 0 0 0 0 0 0 0 0 0
58061- 0 0 0 0 0 0 0 0 0 0 0 0
58062- 0 0 0 0 0 0 0 0 0 0 0 0
58063- 0 0 0 0 0 0 0 0 0 0 0 0
58064- 0 0 0 6 6 6 14 14 14 26 26 26
58065- 38 38 38 50 50 50 58 58 58 58 58 58
58066- 54 54 54 42 42 42 30 30 30 18 18 18
58067- 10 10 10 0 0 0 0 0 0 0 0 0
58068- 0 0 0 0 0 0 0 0 0 0 0 0
58069- 0 0 0 0 0 0 0 0 0 0 0 0
58070- 0 0 0 0 0 0 0 0 0 0 0 0
58071- 0 0 0 0 0 0 0 0 0 0 0 0
58072- 0 0 0 0 0 0 0 0 0 0 0 0
58073- 0 0 0 0 0 0 0 0 0 0 0 0
58074- 0 0 0 0 0 0 0 0 0 0 0 0
58075- 0 0 0 0 0 0 0 0 0 0 0 0
58076- 0 0 0 0 0 0 0 0 0 0 0 0
58077- 0 0 0 0 0 0 0 0 0 6 6 6
58078- 6 6 6 10 10 10 14 14 14 18 18 18
58079- 18 18 18 14 14 14 10 10 10 6 6 6
58080- 0 0 0 0 0 0 0 0 0 0 0 0
58081- 0 0 0 0 0 0 0 0 0 0 0 0
58082- 0 0 0 0 0 0 0 0 0 0 0 0
58083- 0 0 0 0 0 0 0 0 0 0 0 0
58084- 0 0 0 0 0 0 0 0 0 6 6 6
58085- 14 14 14 18 18 18 22 22 22 22 22 22
58086- 18 18 18 14 14 14 10 10 10 6 6 6
58087- 0 0 0 0 0 0 0 0 0 0 0 0
58088- 0 0 0 0 0 0 0 0 0 0 0 0
58089- 0 0 0 0 0 0 0 0 0 0 0 0
58090- 0 0 0 0 0 0 0 0 0 0 0 0
58091- 0 0 0 0 0 0 0 0 0 0 0 0
58092+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58093+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58094+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58101+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58104+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58105+4 4 4 4 4 4
58106+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58107+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58108+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58109+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58118+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58119+4 4 4 4 4 4
58120+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58121+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58122+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58123+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58127+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58128+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58131+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58132+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58133+4 4 4 4 4 4
58134+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58135+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58136+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58137+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58143+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58145+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58146+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58147+4 4 4 4 4 4
58148+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58149+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58151+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58156+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58160+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58161+4 4 4 4 4 4
58162+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58172+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58173+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58174+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58175+4 4 4 4 4 4
58176+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58179+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58180+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
58181+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
58182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58183+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58184+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58185+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
58186+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58187+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
58188+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58189+4 4 4 4 4 4
58190+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58193+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58194+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
58195+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
58196+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58197+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58198+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58199+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
58200+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
58201+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
58202+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58203+4 4 4 4 4 4
58204+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58207+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58208+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
58209+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
58210+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58211+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58212+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58213+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
58214+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
58215+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
58216+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
58217+4 4 4 4 4 4
58218+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58221+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
58222+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
58223+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
58224+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
58225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58226+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
58227+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
58228+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
58229+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
58230+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
58231+4 4 4 4 4 4
58232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58235+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
58236+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
58237+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
58238+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
58239+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58240+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
58241+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
58242+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
58243+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
58244+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
58245+4 4 4 4 4 4
58246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
58249+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
58250+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
58251+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
58252+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
58253+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
58254+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
58255+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
58256+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
58257+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
58258+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
58259+4 4 4 4 4 4
58260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58262+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
58263+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
58264+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
58265+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
58266+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
58267+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
58268+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
58269+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
58270+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
58271+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
58272+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
58273+4 4 4 4 4 4
58274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58276+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
58277+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
58278+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
58279+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
58280+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
58281+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
58282+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
58283+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
58284+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
58285+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
58286+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
58287+4 4 4 4 4 4
58288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58290+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
58291+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
58292+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
58293+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
58294+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
58295+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
58296+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
58297+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
58298+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
58299+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
58300+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58301+4 4 4 4 4 4
58302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58304+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
58305+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
58306+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
58307+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
58308+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
58309+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
58310+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
58311+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
58312+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
58313+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
58314+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
58315+4 4 4 4 4 4
58316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58317+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
58318+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
58319+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
58320+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
58321+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
58322+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
58323+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
58324+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
58325+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
58326+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
58327+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
58328+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
58329+4 4 4 4 4 4
58330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58331+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
58332+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
58333+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
58334+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
58335+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
58336+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
58337+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
58338+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
58339+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
58340+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
58341+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
58342+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
58343+0 0 0 4 4 4
58344+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
58345+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
58346+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
58347+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
58348+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
58349+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
58350+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
58351+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
58352+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
58353+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
58354+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
58355+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
58356+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
58357+2 0 0 0 0 0
58358+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
58359+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
58360+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
58361+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
58362+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
58363+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
58364+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
58365+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
58366+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
58367+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
58368+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
58369+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
58370+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
58371+37 38 37 0 0 0
58372+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
58373+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
58374+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
58375+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
58376+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
58377+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
58378+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
58379+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
58380+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
58381+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
58382+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
58383+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
58384+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
58385+85 115 134 4 0 0
58386+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
58387+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
58388+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
58389+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
58390+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
58391+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
58392+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
58393+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
58394+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
58395+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
58396+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
58397+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
58398+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
58399+60 73 81 4 0 0
58400+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
58401+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
58402+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
58403+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
58404+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
58405+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
58406+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
58407+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
58408+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
58409+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
58410+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
58411+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
58412+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
58413+16 19 21 4 0 0
58414+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
58415+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
58416+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
58417+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
58418+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
58419+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
58420+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
58421+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
58422+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
58423+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
58424+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
58425+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
58426+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
58427+4 0 0 4 3 3
58428+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
58429+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
58430+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
58431+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
58432+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
58433+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
58434+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
58435+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
58436+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
58437+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
58438+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
58439+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
58440+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
58441+3 2 2 4 4 4
58442+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
58443+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
58444+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
58445+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
58446+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
58447+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
58448+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
58449+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
58450+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
58451+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
58452+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
58453+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
58454+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
58455+4 4 4 4 4 4
58456+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
58457+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
58458+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
58459+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
58460+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
58461+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
58462+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
58463+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
58464+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
58465+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
58466+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
58467+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
58468+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
58469+4 4 4 4 4 4
58470+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
58471+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
58472+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
58473+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
58474+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
58475+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
58476+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
58477+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
58478+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
58479+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
58480+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
58481+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
58482+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
58483+5 5 5 5 5 5
58484+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
58485+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
58486+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
58487+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
58488+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
58489+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58490+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
58491+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
58492+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
58493+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
58494+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
58495+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
58496+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
58497+5 5 5 4 4 4
58498+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
58499+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
58500+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
58501+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
58502+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58503+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
58504+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
58505+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
58506+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
58507+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
58508+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
58509+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
58510+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58511+4 4 4 4 4 4
58512+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
58513+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
58514+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
58515+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
58516+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
58517+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58518+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58519+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
58520+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
58521+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
58522+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
58523+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
58524+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58525+4 4 4 4 4 4
58526+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
58527+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
58528+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
58529+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
58530+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58531+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
58532+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
58533+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
58534+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
58535+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
58536+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
58537+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58538+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58539+4 4 4 4 4 4
58540+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
58541+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
58542+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
58543+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
58544+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58545+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58546+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58547+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
58548+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
58549+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
58550+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
58551+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58552+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58553+4 4 4 4 4 4
58554+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
58555+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
58556+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
58557+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
58558+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58559+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
58560+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
58561+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
58562+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
58563+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
58564+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58565+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58566+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58567+4 4 4 4 4 4
58568+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
58569+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
58570+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
58571+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
58572+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58573+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
58574+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
58575+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
58576+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
58577+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
58578+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
58579+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58580+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58581+4 4 4 4 4 4
58582+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
58583+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
58584+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
58585+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
58586+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58587+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
58588+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
58589+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
58590+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
58591+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
58592+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
58593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58594+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58595+4 4 4 4 4 4
58596+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
58597+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
58598+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
58599+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
58600+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
58601+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
58602+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
58603+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
58604+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
58605+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
58606+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58608+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58609+4 4 4 4 4 4
58610+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
58611+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
58612+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
58613+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
58614+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58615+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
58616+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
58617+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
58618+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
58619+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
58620+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58621+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58622+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58623+4 4 4 4 4 4
58624+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
58625+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
58626+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
58627+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
58628+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58629+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
58630+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
58631+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
58632+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
58633+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
58634+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58635+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58636+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58637+4 4 4 4 4 4
58638+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
58639+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
58640+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
58641+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
58642+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58643+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
58644+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
58645+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
58646+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
58647+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58648+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58649+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58650+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58651+4 4 4 4 4 4
58652+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
58653+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
58654+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
58655+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
58656+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
58657+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
58658+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
58659+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
58660+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58661+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58662+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58663+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58664+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58665+4 4 4 4 4 4
58666+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
58667+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
58668+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
58669+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
58670+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58671+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
58672+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
58673+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
58674+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
58675+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58676+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58677+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58678+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58679+4 4 4 4 4 4
58680+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
58681+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
58682+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
58683+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
58684+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
58685+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
58686+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
58687+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
58688+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58689+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58690+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58691+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58692+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58693+4 4 4 4 4 4
58694+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
58695+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
58696+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
58697+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
58698+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
58699+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
58700+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
58701+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
58702+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
58703+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58704+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58705+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58706+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58707+4 4 4 4 4 4
58708+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
58709+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
58710+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
58711+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
58712+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
58713+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
58714+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
58715+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
58716+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58717+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58718+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58719+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58720+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58721+4 4 4 4 4 4
58722+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
58723+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
58724+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
58725+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
58726+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
58727+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
58728+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
58729+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
58730+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
58731+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58732+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58733+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58734+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58735+4 4 4 4 4 4
58736+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
58737+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
58738+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
58739+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
58740+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
58741+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
58742+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
58743+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
58744+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58745+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58746+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58747+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58748+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58749+4 4 4 4 4 4
58750+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
58751+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
58752+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
58753+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
58754+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
58755+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
58756+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
58757+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
58758+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
58759+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58760+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58761+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58763+4 4 4 4 4 4
58764+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
58765+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
58766+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
58767+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
58768+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
58769+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
58770+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58771+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
58772+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58773+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58774+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58775+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58777+4 4 4 4 4 4
58778+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
58779+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
58780+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
58781+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
58782+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
58783+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
58784+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58785+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
58786+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
58787+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58788+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58791+4 4 4 4 4 4
58792+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
58793+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
58794+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
58795+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
58796+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
58797+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
58798+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
58799+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
58800+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
58801+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58802+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58805+4 4 4 4 4 4
58806+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
58807+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
58808+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
58809+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
58810+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
58811+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
58812+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
58813+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
58814+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
58815+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58816+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58819+4 4 4 4 4 4
58820+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
58821+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
58822+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
58823+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
58824+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
58825+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
58826+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
58827+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
58828+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
58829+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58830+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58833+4 4 4 4 4 4
58834+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
58835+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
58836+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
58837+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
58838+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
58839+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
58840+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
58841+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
58842+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
58843+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58844+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58847+4 4 4 4 4 4
58848+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
58849+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
58850+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
58851+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
58852+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
58853+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
58854+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
58855+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
58856+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
58857+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
58858+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58861+4 4 4 4 4 4
58862+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
58863+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
58864+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
58865+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
58866+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
58867+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
58868+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
58869+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
58870+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
58871+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
58872+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58875+4 4 4 4 4 4
58876+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
58877+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
58878+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
58879+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
58880+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
58881+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
58882+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58883+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
58884+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
58885+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
58886+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58889+4 4 4 4 4 4
58890+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
58891+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
58892+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
58893+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
58894+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
58895+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
58896+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
58897+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
58898+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
58899+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
58900+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58903+4 4 4 4 4 4
58904+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
58905+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
58906+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
58907+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
58908+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
58909+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
58910+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
58911+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
58912+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
58913+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
58914+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58915+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58917+4 4 4 4 4 4
58918+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
58919+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
58920+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
58921+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
58922+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
58923+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
58924+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
58925+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
58926+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
58927+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
58928+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58929+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58931+4 4 4 4 4 4
58932+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
58933+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
58934+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
58935+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
58936+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
58937+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
58938+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
58939+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
58940+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
58941+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
58942+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58943+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58944+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58945+4 4 4 4 4 4
58946+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
58947+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
58948+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
58949+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
58950+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
58951+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
58952+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
58953+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
58954+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
58955+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58956+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58957+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58958+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58959+4 4 4 4 4 4
58960+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
58961+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
58962+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
58963+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
58964+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
58965+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
58966+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
58967+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
58968+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
58969+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58970+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58973+4 4 4 4 4 4
58974+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
58975+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
58976+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
58977+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
58978+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
58979+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
58980+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
58981+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
58982+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
58983+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58987+4 4 4 4 4 4
58988+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
58989+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
58990+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
58991+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
58992+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
58993+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
58994+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
58995+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
58996+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58997+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58998+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59001+4 4 4 4 4 4
59002+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
59003+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
59004+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
59005+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
59006+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
59007+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
59008+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
59009+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
59010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59011+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59015+4 4 4 4 4 4
59016+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
59017+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
59018+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
59019+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
59020+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
59021+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
59022+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
59023+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
59024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59025+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59029+4 4 4 4 4 4
59030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
59031+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
59032+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
59033+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
59034+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
59035+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
59036+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
59037+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
59038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59039+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59043+4 4 4 4 4 4
59044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59045+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
59046+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
59047+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
59048+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
59049+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
59050+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
59051+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
59052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59057+4 4 4 4 4 4
59058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59059+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
59060+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
59061+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
59062+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
59063+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
59064+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
59065+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
59066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59071+4 4 4 4 4 4
59072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59074+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
59075+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
59076+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
59077+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
59078+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
59079+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
59080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59081+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59085+4 4 4 4 4 4
59086+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
59089+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
59090+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
59091+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
59092+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
59093+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59094+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59099+4 4 4 4 4 4
59100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59101+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59103+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
59104+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
59105+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
59106+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
59107+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59108+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59109+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59113+4 4 4 4 4 4
59114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59117+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
59118+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
59119+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
59120+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
59121+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59122+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59123+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59127+4 4 4 4 4 4
59128+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59131+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
59132+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
59133+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
59134+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
59135+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59136+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59137+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59141+4 4 4 4 4 4
59142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59143+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59145+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
59146+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
59147+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
59148+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59149+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59151+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59155+4 4 4 4 4 4
59156+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59160+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
59161+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
59162+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
59163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59169+4 4 4 4 4 4
59170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59172+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59173+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59174+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
59175+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
59176+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59179+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59181+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59183+4 4 4 4 4 4
59184+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59185+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59186+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59188+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
59189+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
59190+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59193+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59195+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59197+4 4 4 4 4 4
59198+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59199+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59202+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
59203+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
59204+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59207+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59209+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59210+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59211+4 4 4 4 4 4
59212diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
59213index 2b8553b..e1a482b 100644
59214--- a/drivers/xen/events/events_base.c
59215+++ b/drivers/xen/events/events_base.c
59216@@ -1564,7 +1564,7 @@ void xen_irq_resume(void)
59217 restore_pirqs();
59218 }
59219
59220-static struct irq_chip xen_dynamic_chip __read_mostly = {
59221+static struct irq_chip xen_dynamic_chip = {
59222 .name = "xen-dyn",
59223
59224 .irq_disable = disable_dynirq,
59225@@ -1578,7 +1578,7 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
59226 .irq_retrigger = retrigger_dynirq,
59227 };
59228
59229-static struct irq_chip xen_pirq_chip __read_mostly = {
59230+static struct irq_chip xen_pirq_chip = {
59231 .name = "xen-pirq",
59232
59233 .irq_startup = startup_pirq,
59234@@ -1598,7 +1598,7 @@ static struct irq_chip xen_pirq_chip __read_mostly = {
59235 .irq_retrigger = retrigger_dynirq,
59236 };
59237
59238-static struct irq_chip xen_percpu_chip __read_mostly = {
59239+static struct irq_chip xen_percpu_chip = {
59240 .name = "xen-percpu",
59241
59242 .irq_disable = disable_dynirq,
59243diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
59244index fef20db..d28b1ab 100644
59245--- a/drivers/xen/xenfs/xenstored.c
59246+++ b/drivers/xen/xenfs/xenstored.c
59247@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
59248 static int xsd_kva_open(struct inode *inode, struct file *file)
59249 {
59250 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
59251+#ifdef CONFIG_GRKERNSEC_HIDESYM
59252+ NULL);
59253+#else
59254 xen_store_interface);
59255+#endif
59256+
59257 if (!file->private_data)
59258 return -ENOMEM;
59259 return 0;
59260diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
59261index eb14e05..5156de7 100644
59262--- a/fs/9p/vfs_addr.c
59263+++ b/fs/9p/vfs_addr.c
59264@@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
59265
59266 retval = v9fs_file_write_internal(inode,
59267 v9inode->writeback_fid,
59268- (__force const char __user *)buffer,
59269+ (const char __force_user *)buffer,
59270 len, &offset, 0);
59271 if (retval > 0)
59272 retval = 0;
59273diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
59274index 3662f1d..90558b5 100644
59275--- a/fs/9p/vfs_inode.c
59276+++ b/fs/9p/vfs_inode.c
59277@@ -1312,7 +1312,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
59278 void
59279 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
59280 {
59281- char *s = nd_get_link(nd);
59282+ const char *s = nd_get_link(nd);
59283
59284 p9_debug(P9_DEBUG_VFS, " %pd %s\n",
59285 dentry, IS_ERR(s) ? "<error>" : s);
59286diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
59287index 270c481..0d8a962 100644
59288--- a/fs/Kconfig.binfmt
59289+++ b/fs/Kconfig.binfmt
59290@@ -106,7 +106,7 @@ config HAVE_AOUT
59291
59292 config BINFMT_AOUT
59293 tristate "Kernel support for a.out and ECOFF binaries"
59294- depends on HAVE_AOUT
59295+ depends on HAVE_AOUT && BROKEN
59296 ---help---
59297 A.out (Assembler.OUTput) is a set of formats for libraries and
59298 executables used in the earliest versions of UNIX. Linux used
59299diff --git a/fs/afs/inode.c b/fs/afs/inode.c
59300index 8a1d38e..300a14e 100644
59301--- a/fs/afs/inode.c
59302+++ b/fs/afs/inode.c
59303@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
59304 struct afs_vnode *vnode;
59305 struct super_block *sb;
59306 struct inode *inode;
59307- static atomic_t afs_autocell_ino;
59308+ static atomic_unchecked_t afs_autocell_ino;
59309
59310 _enter("{%x:%u},%*.*s,",
59311 AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
59312@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
59313 data.fid.unique = 0;
59314 data.fid.vnode = 0;
59315
59316- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
59317+ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
59318 afs_iget5_autocell_test, afs_iget5_set,
59319 &data);
59320 if (!inode) {
59321diff --git a/fs/aio.c b/fs/aio.c
59322index a793f70..46f45af 100644
59323--- a/fs/aio.c
59324+++ b/fs/aio.c
59325@@ -404,7 +404,7 @@ static int aio_setup_ring(struct kioctx *ctx)
59326 size += sizeof(struct io_event) * nr_events;
59327
59328 nr_pages = PFN_UP(size);
59329- if (nr_pages < 0)
59330+ if (nr_pages <= 0)
59331 return -EINVAL;
59332
59333 file = aio_private_file(ctx, nr_pages);
59334diff --git a/fs/attr.c b/fs/attr.c
59335index 6530ced..4a827e2 100644
59336--- a/fs/attr.c
59337+++ b/fs/attr.c
59338@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
59339 unsigned long limit;
59340
59341 limit = rlimit(RLIMIT_FSIZE);
59342+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
59343 if (limit != RLIM_INFINITY && offset > limit)
59344 goto out_sig;
59345 if (offset > inode->i_sb->s_maxbytes)
59346diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
59347index 116fd38..c04182da 100644
59348--- a/fs/autofs4/waitq.c
59349+++ b/fs/autofs4/waitq.c
59350@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
59351 {
59352 unsigned long sigpipe, flags;
59353 mm_segment_t fs;
59354- const char *data = (const char *)addr;
59355+ const char __user *data = (const char __force_user *)addr;
59356 ssize_t wr = 0;
59357
59358 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
59359@@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait,
59360 return 1;
59361 }
59362
59363+#ifdef CONFIG_GRKERNSEC_HIDESYM
59364+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
59365+#endif
59366+
59367 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
59368 enum autofs_notify notify)
59369 {
59370@@ -385,7 +389,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
59371
59372 /* If this is a direct mount request create a dummy name */
59373 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
59374+#ifdef CONFIG_GRKERNSEC_HIDESYM
59375+ /* this name does get written to userland via autofs4_write() */
59376+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
59377+#else
59378 qstr.len = sprintf(name, "%p", dentry);
59379+#endif
59380 else {
59381 qstr.len = autofs4_getpath(sbi, dentry, &name);
59382 if (!qstr.len) {
59383diff --git a/fs/befs/endian.h b/fs/befs/endian.h
59384index 2722387..56059b5 100644
59385--- a/fs/befs/endian.h
59386+++ b/fs/befs/endian.h
59387@@ -11,7 +11,7 @@
59388
59389 #include <asm/byteorder.h>
59390
59391-static inline u64
59392+static inline u64 __intentional_overflow(-1)
59393 fs64_to_cpu(const struct super_block *sb, fs64 n)
59394 {
59395 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
59396@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
59397 return (__force fs64)cpu_to_be64(n);
59398 }
59399
59400-static inline u32
59401+static inline u32 __intentional_overflow(-1)
59402 fs32_to_cpu(const struct super_block *sb, fs32 n)
59403 {
59404 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
59405@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
59406 return (__force fs32)cpu_to_be32(n);
59407 }
59408
59409-static inline u16
59410+static inline u16 __intentional_overflow(-1)
59411 fs16_to_cpu(const struct super_block *sb, fs16 n)
59412 {
59413 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
59414diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
59415index 4c55668..eeae150 100644
59416--- a/fs/binfmt_aout.c
59417+++ b/fs/binfmt_aout.c
59418@@ -16,6 +16,7 @@
59419 #include <linux/string.h>
59420 #include <linux/fs.h>
59421 #include <linux/file.h>
59422+#include <linux/security.h>
59423 #include <linux/stat.h>
59424 #include <linux/fcntl.h>
59425 #include <linux/ptrace.h>
59426@@ -58,6 +59,8 @@ static int aout_core_dump(struct coredump_params *cprm)
59427 #endif
59428 # define START_STACK(u) ((void __user *)u.start_stack)
59429
59430+ memset(&dump, 0, sizeof(dump));
59431+
59432 fs = get_fs();
59433 set_fs(KERNEL_DS);
59434 has_dumped = 1;
59435@@ -68,10 +71,12 @@ static int aout_core_dump(struct coredump_params *cprm)
59436
59437 /* If the size of the dump file exceeds the rlimit, then see what would happen
59438 if we wrote the stack, but not the data area. */
59439+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
59440 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
59441 dump.u_dsize = 0;
59442
59443 /* Make sure we have enough room to write the stack and data areas. */
59444+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
59445 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
59446 dump.u_ssize = 0;
59447
59448@@ -232,6 +237,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
59449 rlim = rlimit(RLIMIT_DATA);
59450 if (rlim >= RLIM_INFINITY)
59451 rlim = ~0;
59452+
59453+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
59454 if (ex.a_data + ex.a_bss > rlim)
59455 return -ENOMEM;
59456
59457@@ -261,6 +268,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
59458
59459 install_exec_creds(bprm);
59460
59461+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59462+ current->mm->pax_flags = 0UL;
59463+#endif
59464+
59465+#ifdef CONFIG_PAX_PAGEEXEC
59466+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
59467+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
59468+
59469+#ifdef CONFIG_PAX_EMUTRAMP
59470+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
59471+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
59472+#endif
59473+
59474+#ifdef CONFIG_PAX_MPROTECT
59475+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
59476+ current->mm->pax_flags |= MF_PAX_MPROTECT;
59477+#endif
59478+
59479+ }
59480+#endif
59481+
59482 if (N_MAGIC(ex) == OMAGIC) {
59483 unsigned long text_addr, map_size;
59484 loff_t pos;
59485@@ -312,7 +340,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
59486 return error;
59487
59488 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
59489- PROT_READ | PROT_WRITE | PROT_EXEC,
59490+ PROT_READ | PROT_WRITE,
59491 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
59492 fd_offset + ex.a_text);
59493 if (error != N_DATADDR(ex))
59494diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
59495index d925f55..d31f527 100644
59496--- a/fs/binfmt_elf.c
59497+++ b/fs/binfmt_elf.c
59498@@ -34,6 +34,7 @@
59499 #include <linux/utsname.h>
59500 #include <linux/coredump.h>
59501 #include <linux/sched.h>
59502+#include <linux/xattr.h>
59503 #include <asm/uaccess.h>
59504 #include <asm/param.h>
59505 #include <asm/page.h>
59506@@ -47,7 +48,7 @@
59507
59508 static int load_elf_binary(struct linux_binprm *bprm);
59509 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
59510- int, int, unsigned long);
59511+ int, int, unsigned long) __intentional_overflow(-1);
59512
59513 #ifdef CONFIG_USELIB
59514 static int load_elf_library(struct file *);
59515@@ -65,6 +66,14 @@ static int elf_core_dump(struct coredump_params *cprm);
59516 #define elf_core_dump NULL
59517 #endif
59518
59519+#ifdef CONFIG_PAX_MPROTECT
59520+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
59521+#endif
59522+
59523+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59524+static void elf_handle_mmap(struct file *file);
59525+#endif
59526+
59527 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
59528 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
59529 #else
59530@@ -84,6 +93,15 @@ static struct linux_binfmt elf_format = {
59531 .load_binary = load_elf_binary,
59532 .load_shlib = load_elf_library,
59533 .core_dump = elf_core_dump,
59534+
59535+#ifdef CONFIG_PAX_MPROTECT
59536+ .handle_mprotect= elf_handle_mprotect,
59537+#endif
59538+
59539+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59540+ .handle_mmap = elf_handle_mmap,
59541+#endif
59542+
59543 .min_coredump = ELF_EXEC_PAGESIZE,
59544 };
59545
59546@@ -91,6 +109,8 @@ static struct linux_binfmt elf_format = {
59547
59548 static int set_brk(unsigned long start, unsigned long end)
59549 {
59550+ unsigned long e = end;
59551+
59552 start = ELF_PAGEALIGN(start);
59553 end = ELF_PAGEALIGN(end);
59554 if (end > start) {
59555@@ -99,7 +119,7 @@ static int set_brk(unsigned long start, unsigned long end)
59556 if (BAD_ADDR(addr))
59557 return addr;
59558 }
59559- current->mm->start_brk = current->mm->brk = end;
59560+ current->mm->start_brk = current->mm->brk = e;
59561 return 0;
59562 }
59563
59564@@ -160,12 +180,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
59565 elf_addr_t __user *u_rand_bytes;
59566 const char *k_platform = ELF_PLATFORM;
59567 const char *k_base_platform = ELF_BASE_PLATFORM;
59568- unsigned char k_rand_bytes[16];
59569+ u32 k_rand_bytes[4];
59570 int items;
59571 elf_addr_t *elf_info;
59572 int ei_index = 0;
59573 const struct cred *cred = current_cred();
59574 struct vm_area_struct *vma;
59575+ unsigned long saved_auxv[AT_VECTOR_SIZE];
59576
59577 /*
59578 * In some cases (e.g. Hyper-Threading), we want to avoid L1
59579@@ -207,8 +228,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
59580 * Generate 16 random bytes for userspace PRNG seeding.
59581 */
59582 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
59583- u_rand_bytes = (elf_addr_t __user *)
59584- STACK_ALLOC(p, sizeof(k_rand_bytes));
59585+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
59586+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
59587+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
59588+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
59589+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
59590+ u_rand_bytes = (elf_addr_t __user *) p;
59591 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
59592 return -EFAULT;
59593
59594@@ -323,9 +348,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
59595 return -EFAULT;
59596 current->mm->env_end = p;
59597
59598+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
59599+
59600 /* Put the elf_info on the stack in the right place. */
59601 sp = (elf_addr_t __user *)envp + 1;
59602- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
59603+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
59604 return -EFAULT;
59605 return 0;
59606 }
59607@@ -514,14 +541,14 @@ static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
59608 an ELF header */
59609
59610 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
59611- struct file *interpreter, unsigned long *interp_map_addr,
59612+ struct file *interpreter,
59613 unsigned long no_base, struct elf_phdr *interp_elf_phdata)
59614 {
59615 struct elf_phdr *eppnt;
59616- unsigned long load_addr = 0;
59617+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
59618 int load_addr_set = 0;
59619 unsigned long last_bss = 0, elf_bss = 0;
59620- unsigned long error = ~0UL;
59621+ unsigned long error = -EINVAL;
59622 unsigned long total_size;
59623 int i;
59624
59625@@ -541,6 +568,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
59626 goto out;
59627 }
59628
59629+#ifdef CONFIG_PAX_SEGMEXEC
59630+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
59631+ pax_task_size = SEGMEXEC_TASK_SIZE;
59632+#endif
59633+
59634 eppnt = interp_elf_phdata;
59635 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
59636 if (eppnt->p_type == PT_LOAD) {
59637@@ -564,8 +596,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
59638 map_addr = elf_map(interpreter, load_addr + vaddr,
59639 eppnt, elf_prot, elf_type, total_size);
59640 total_size = 0;
59641- if (!*interp_map_addr)
59642- *interp_map_addr = map_addr;
59643 error = map_addr;
59644 if (BAD_ADDR(map_addr))
59645 goto out;
59646@@ -584,8 +614,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
59647 k = load_addr + eppnt->p_vaddr;
59648 if (BAD_ADDR(k) ||
59649 eppnt->p_filesz > eppnt->p_memsz ||
59650- eppnt->p_memsz > TASK_SIZE ||
59651- TASK_SIZE - eppnt->p_memsz < k) {
59652+ eppnt->p_memsz > pax_task_size ||
59653+ pax_task_size - eppnt->p_memsz < k) {
59654 error = -ENOMEM;
59655 goto out;
59656 }
59657@@ -624,9 +654,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
59658 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
59659
59660 /* Map the last of the bss segment */
59661- error = vm_brk(elf_bss, last_bss - elf_bss);
59662- if (BAD_ADDR(error))
59663- goto out;
59664+ if (last_bss > elf_bss) {
59665+ error = vm_brk(elf_bss, last_bss - elf_bss);
59666+ if (BAD_ADDR(error))
59667+ goto out;
59668+ }
59669 }
59670
59671 error = load_addr;
59672@@ -634,6 +666,336 @@ out:
59673 return error;
59674 }
59675
59676+#ifdef CONFIG_PAX_PT_PAX_FLAGS
59677+#ifdef CONFIG_PAX_SOFTMODE
59678+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
59679+{
59680+ unsigned long pax_flags = 0UL;
59681+
59682+#ifdef CONFIG_PAX_PAGEEXEC
59683+ if (elf_phdata->p_flags & PF_PAGEEXEC)
59684+ pax_flags |= MF_PAX_PAGEEXEC;
59685+#endif
59686+
59687+#ifdef CONFIG_PAX_SEGMEXEC
59688+ if (elf_phdata->p_flags & PF_SEGMEXEC)
59689+ pax_flags |= MF_PAX_SEGMEXEC;
59690+#endif
59691+
59692+#ifdef CONFIG_PAX_EMUTRAMP
59693+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
59694+ pax_flags |= MF_PAX_EMUTRAMP;
59695+#endif
59696+
59697+#ifdef CONFIG_PAX_MPROTECT
59698+ if (elf_phdata->p_flags & PF_MPROTECT)
59699+ pax_flags |= MF_PAX_MPROTECT;
59700+#endif
59701+
59702+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
59703+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
59704+ pax_flags |= MF_PAX_RANDMMAP;
59705+#endif
59706+
59707+ return pax_flags;
59708+}
59709+#endif
59710+
59711+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
59712+{
59713+ unsigned long pax_flags = 0UL;
59714+
59715+#ifdef CONFIG_PAX_PAGEEXEC
59716+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
59717+ pax_flags |= MF_PAX_PAGEEXEC;
59718+#endif
59719+
59720+#ifdef CONFIG_PAX_SEGMEXEC
59721+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
59722+ pax_flags |= MF_PAX_SEGMEXEC;
59723+#endif
59724+
59725+#ifdef CONFIG_PAX_EMUTRAMP
59726+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
59727+ pax_flags |= MF_PAX_EMUTRAMP;
59728+#endif
59729+
59730+#ifdef CONFIG_PAX_MPROTECT
59731+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
59732+ pax_flags |= MF_PAX_MPROTECT;
59733+#endif
59734+
59735+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
59736+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
59737+ pax_flags |= MF_PAX_RANDMMAP;
59738+#endif
59739+
59740+ return pax_flags;
59741+}
59742+#endif
59743+
59744+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
59745+#ifdef CONFIG_PAX_SOFTMODE
59746+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
59747+{
59748+ unsigned long pax_flags = 0UL;
59749+
59750+#ifdef CONFIG_PAX_PAGEEXEC
59751+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
59752+ pax_flags |= MF_PAX_PAGEEXEC;
59753+#endif
59754+
59755+#ifdef CONFIG_PAX_SEGMEXEC
59756+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
59757+ pax_flags |= MF_PAX_SEGMEXEC;
59758+#endif
59759+
59760+#ifdef CONFIG_PAX_EMUTRAMP
59761+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
59762+ pax_flags |= MF_PAX_EMUTRAMP;
59763+#endif
59764+
59765+#ifdef CONFIG_PAX_MPROTECT
59766+ if (pax_flags_softmode & MF_PAX_MPROTECT)
59767+ pax_flags |= MF_PAX_MPROTECT;
59768+#endif
59769+
59770+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
59771+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
59772+ pax_flags |= MF_PAX_RANDMMAP;
59773+#endif
59774+
59775+ return pax_flags;
59776+}
59777+#endif
59778+
59779+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
59780+{
59781+ unsigned long pax_flags = 0UL;
59782+
59783+#ifdef CONFIG_PAX_PAGEEXEC
59784+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
59785+ pax_flags |= MF_PAX_PAGEEXEC;
59786+#endif
59787+
59788+#ifdef CONFIG_PAX_SEGMEXEC
59789+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
59790+ pax_flags |= MF_PAX_SEGMEXEC;
59791+#endif
59792+
59793+#ifdef CONFIG_PAX_EMUTRAMP
59794+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
59795+ pax_flags |= MF_PAX_EMUTRAMP;
59796+#endif
59797+
59798+#ifdef CONFIG_PAX_MPROTECT
59799+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
59800+ pax_flags |= MF_PAX_MPROTECT;
59801+#endif
59802+
59803+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
59804+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
59805+ pax_flags |= MF_PAX_RANDMMAP;
59806+#endif
59807+
59808+ return pax_flags;
59809+}
59810+#endif
59811+
59812+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59813+static unsigned long pax_parse_defaults(void)
59814+{
59815+ unsigned long pax_flags = 0UL;
59816+
59817+#ifdef CONFIG_PAX_SOFTMODE
59818+ if (pax_softmode)
59819+ return pax_flags;
59820+#endif
59821+
59822+#ifdef CONFIG_PAX_PAGEEXEC
59823+ pax_flags |= MF_PAX_PAGEEXEC;
59824+#endif
59825+
59826+#ifdef CONFIG_PAX_SEGMEXEC
59827+ pax_flags |= MF_PAX_SEGMEXEC;
59828+#endif
59829+
59830+#ifdef CONFIG_PAX_MPROTECT
59831+ pax_flags |= MF_PAX_MPROTECT;
59832+#endif
59833+
59834+#ifdef CONFIG_PAX_RANDMMAP
59835+ if (randomize_va_space)
59836+ pax_flags |= MF_PAX_RANDMMAP;
59837+#endif
59838+
59839+ return pax_flags;
59840+}
59841+
59842+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
59843+{
59844+ unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
59845+
59846+#ifdef CONFIG_PAX_EI_PAX
59847+
59848+#ifdef CONFIG_PAX_SOFTMODE
59849+ if (pax_softmode)
59850+ return pax_flags;
59851+#endif
59852+
59853+ pax_flags = 0UL;
59854+
59855+#ifdef CONFIG_PAX_PAGEEXEC
59856+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
59857+ pax_flags |= MF_PAX_PAGEEXEC;
59858+#endif
59859+
59860+#ifdef CONFIG_PAX_SEGMEXEC
59861+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
59862+ pax_flags |= MF_PAX_SEGMEXEC;
59863+#endif
59864+
59865+#ifdef CONFIG_PAX_EMUTRAMP
59866+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
59867+ pax_flags |= MF_PAX_EMUTRAMP;
59868+#endif
59869+
59870+#ifdef CONFIG_PAX_MPROTECT
59871+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
59872+ pax_flags |= MF_PAX_MPROTECT;
59873+#endif
59874+
59875+#ifdef CONFIG_PAX_ASLR
59876+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
59877+ pax_flags |= MF_PAX_RANDMMAP;
59878+#endif
59879+
59880+#endif
59881+
59882+ return pax_flags;
59883+
59884+}
59885+
59886+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
59887+{
59888+
59889+#ifdef CONFIG_PAX_PT_PAX_FLAGS
59890+ unsigned long i;
59891+
59892+ for (i = 0UL; i < elf_ex->e_phnum; i++)
59893+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
59894+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
59895+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
59896+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
59897+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
59898+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
59899+ return PAX_PARSE_FLAGS_FALLBACK;
59900+
59901+#ifdef CONFIG_PAX_SOFTMODE
59902+ if (pax_softmode)
59903+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
59904+ else
59905+#endif
59906+
59907+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
59908+ break;
59909+ }
59910+#endif
59911+
59912+ return PAX_PARSE_FLAGS_FALLBACK;
59913+}
59914+
59915+static unsigned long pax_parse_xattr_pax(struct file * const file)
59916+{
59917+
59918+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
59919+ ssize_t xattr_size, i;
59920+ unsigned char xattr_value[sizeof("pemrs") - 1];
59921+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
59922+
59923+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
59924+ if (xattr_size < 0 || xattr_size > sizeof xattr_value)
59925+ return PAX_PARSE_FLAGS_FALLBACK;
59926+
59927+ for (i = 0; i < xattr_size; i++)
59928+ switch (xattr_value[i]) {
59929+ default:
59930+ return PAX_PARSE_FLAGS_FALLBACK;
59931+
59932+#define parse_flag(option1, option2, flag) \
59933+ case option1: \
59934+ if (pax_flags_hardmode & MF_PAX_##flag) \
59935+ return PAX_PARSE_FLAGS_FALLBACK;\
59936+ pax_flags_hardmode |= MF_PAX_##flag; \
59937+ break; \
59938+ case option2: \
59939+ if (pax_flags_softmode & MF_PAX_##flag) \
59940+ return PAX_PARSE_FLAGS_FALLBACK;\
59941+ pax_flags_softmode |= MF_PAX_##flag; \
59942+ break;
59943+
59944+ parse_flag('p', 'P', PAGEEXEC);
59945+ parse_flag('e', 'E', EMUTRAMP);
59946+ parse_flag('m', 'M', MPROTECT);
59947+ parse_flag('r', 'R', RANDMMAP);
59948+ parse_flag('s', 'S', SEGMEXEC);
59949+
59950+#undef parse_flag
59951+ }
59952+
59953+ if (pax_flags_hardmode & pax_flags_softmode)
59954+ return PAX_PARSE_FLAGS_FALLBACK;
59955+
59956+#ifdef CONFIG_PAX_SOFTMODE
59957+ if (pax_softmode)
59958+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
59959+ else
59960+#endif
59961+
59962+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
59963+#else
59964+ return PAX_PARSE_FLAGS_FALLBACK;
59965+#endif
59966+
59967+}
59968+
59969+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
59970+{
59971+ unsigned long pax_flags, ei_pax_flags, pt_pax_flags, xattr_pax_flags;
59972+
59973+ pax_flags = pax_parse_defaults();
59974+ ei_pax_flags = pax_parse_ei_pax(elf_ex);
59975+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
59976+ xattr_pax_flags = pax_parse_xattr_pax(file);
59977+
59978+ if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
59979+ xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
59980+ pt_pax_flags != xattr_pax_flags)
59981+ return -EINVAL;
59982+ if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
59983+ pax_flags = xattr_pax_flags;
59984+ else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
59985+ pax_flags = pt_pax_flags;
59986+ else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
59987+ pax_flags = ei_pax_flags;
59988+
59989+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
59990+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
59991+ if ((__supported_pte_mask & _PAGE_NX))
59992+ pax_flags &= ~MF_PAX_SEGMEXEC;
59993+ else
59994+ pax_flags &= ~MF_PAX_PAGEEXEC;
59995+ }
59996+#endif
59997+
59998+ if (0 > pax_check_flags(&pax_flags))
59999+ return -EINVAL;
60000+
60001+ current->mm->pax_flags = pax_flags;
60002+ return 0;
60003+}
60004+#endif
60005+
60006 /*
60007 * These are the functions used to load ELF style executables and shared
60008 * libraries. There is no binary dependent code anywhere else.
60009@@ -647,6 +1009,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
60010 {
60011 unsigned long random_variable = 0;
60012
60013+#ifdef CONFIG_PAX_RANDUSTACK
60014+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
60015+ return stack_top - current->mm->delta_stack;
60016+#endif
60017+
60018 if ((current->flags & PF_RANDOMIZE) &&
60019 !(current->personality & ADDR_NO_RANDOMIZE)) {
60020 random_variable = (unsigned long) get_random_int();
60021@@ -666,7 +1033,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
60022 unsigned long load_addr = 0, load_bias = 0;
60023 int load_addr_set = 0;
60024 char * elf_interpreter = NULL;
60025- unsigned long error;
60026+ unsigned long error = 0;
60027 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
60028 unsigned long elf_bss, elf_brk;
60029 int retval, i;
60030@@ -681,6 +1048,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
60031 struct elfhdr interp_elf_ex;
60032 } *loc;
60033 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
60034+ unsigned long pax_task_size;
60035
60036 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
60037 if (!loc) {
60038@@ -839,6 +1207,77 @@ static int load_elf_binary(struct linux_binprm *bprm)
60039 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
60040 may depend on the personality. */
60041 SET_PERSONALITY2(loc->elf_ex, &arch_state);
60042+
60043+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60044+ current->mm->pax_flags = 0UL;
60045+#endif
60046+
60047+#ifdef CONFIG_PAX_DLRESOLVE
60048+ current->mm->call_dl_resolve = 0UL;
60049+#endif
60050+
60051+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
60052+ current->mm->call_syscall = 0UL;
60053+#endif
60054+
60055+#ifdef CONFIG_PAX_ASLR
60056+ current->mm->delta_mmap = 0UL;
60057+ current->mm->delta_stack = 0UL;
60058+#endif
60059+
60060+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60061+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
60062+ send_sig(SIGKILL, current, 0);
60063+ goto out_free_dentry;
60064+ }
60065+#endif
60066+
60067+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60068+ pax_set_initial_flags(bprm);
60069+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60070+ if (pax_set_initial_flags_func)
60071+ (pax_set_initial_flags_func)(bprm);
60072+#endif
60073+
60074+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
60075+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
60076+ current->mm->context.user_cs_limit = PAGE_SIZE;
60077+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
60078+ }
60079+#endif
60080+
60081+#ifdef CONFIG_PAX_SEGMEXEC
60082+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
60083+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
60084+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
60085+ pax_task_size = SEGMEXEC_TASK_SIZE;
60086+ current->mm->def_flags |= VM_NOHUGEPAGE;
60087+ } else
60088+#endif
60089+
60090+ pax_task_size = TASK_SIZE;
60091+
60092+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
60093+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
60094+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
60095+ put_cpu();
60096+ }
60097+#endif
60098+
60099+#ifdef CONFIG_PAX_ASLR
60100+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
60101+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
60102+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
60103+ }
60104+#endif
60105+
60106+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
60107+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
60108+ executable_stack = EXSTACK_DISABLE_X;
60109+ current->personality &= ~READ_IMPLIES_EXEC;
60110+ } else
60111+#endif
60112+
60113 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
60114 current->personality |= READ_IMPLIES_EXEC;
60115
60116@@ -925,12 +1364,21 @@ static int load_elf_binary(struct linux_binprm *bprm)
60117 #else
60118 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
60119 #endif
60120- total_size = total_mapping_size(elf_phdata,
60121- loc->elf_ex.e_phnum);
60122- if (!total_size) {
60123- error = -EINVAL;
60124- goto out_free_dentry;
60125+
60126+#ifdef CONFIG_PAX_RANDMMAP
60127+ /* PaX: randomize base address at the default exe base if requested */
60128+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
60129+#ifdef CONFIG_SPARC64
60130+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
60131+#else
60132+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
60133+#endif
60134+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
60135+ elf_flags |= MAP_FIXED;
60136 }
60137+#endif
60138+
60139+ total_size = total_mapping_size(elf_phdata, loc->elf_ex.e_phnum);
60140 }
60141
60142 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
60143@@ -962,9 +1410,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
60144 * allowed task size. Note that p_filesz must always be
60145 * <= p_memsz so it is only necessary to check p_memsz.
60146 */
60147- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
60148- elf_ppnt->p_memsz > TASK_SIZE ||
60149- TASK_SIZE - elf_ppnt->p_memsz < k) {
60150+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
60151+ elf_ppnt->p_memsz > pax_task_size ||
60152+ pax_task_size - elf_ppnt->p_memsz < k) {
60153 /* set_brk can never work. Avoid overflows. */
60154 retval = -EINVAL;
60155 goto out_free_dentry;
60156@@ -1000,16 +1448,43 @@ static int load_elf_binary(struct linux_binprm *bprm)
60157 if (retval)
60158 goto out_free_dentry;
60159 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
60160- retval = -EFAULT; /* Nobody gets to see this, but.. */
60161- goto out_free_dentry;
60162+ /*
60163+ * This bss-zeroing can fail if the ELF
60164+ * file specifies odd protections. So
60165+ * we don't check the return value
60166+ */
60167 }
60168
60169+#ifdef CONFIG_PAX_RANDMMAP
60170+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
60171+ unsigned long start, size, flags;
60172+ vm_flags_t vm_flags;
60173+
60174+ start = ELF_PAGEALIGN(elf_brk);
60175+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
60176+ flags = MAP_FIXED | MAP_PRIVATE;
60177+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
60178+
60179+ down_write(&current->mm->mmap_sem);
60180+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
60181+ retval = -ENOMEM;
60182+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
60183+// if (current->personality & ADDR_NO_RANDOMIZE)
60184+// vm_flags |= VM_READ | VM_MAYREAD;
60185+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
60186+ retval = IS_ERR_VALUE(start) ? start : 0;
60187+ }
60188+ up_write(&current->mm->mmap_sem);
60189+ if (retval == 0)
60190+ retval = set_brk(start + size, start + size + PAGE_SIZE);
60191+ if (retval < 0)
60192+ goto out_free_dentry;
60193+ }
60194+#endif
60195+
60196 if (elf_interpreter) {
60197- unsigned long interp_map_addr = 0;
60198-
60199 elf_entry = load_elf_interp(&loc->interp_elf_ex,
60200 interpreter,
60201- &interp_map_addr,
60202 load_bias, interp_elf_phdata);
60203 if (!IS_ERR((void *)elf_entry)) {
60204 /*
60205@@ -1237,7 +1712,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
60206 * Decide what to dump of a segment, part, all or none.
60207 */
60208 static unsigned long vma_dump_size(struct vm_area_struct *vma,
60209- unsigned long mm_flags)
60210+ unsigned long mm_flags, long signr)
60211 {
60212 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
60213
60214@@ -1275,7 +1750,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
60215 if (vma->vm_file == NULL)
60216 return 0;
60217
60218- if (FILTER(MAPPED_PRIVATE))
60219+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
60220 goto whole;
60221
60222 /*
60223@@ -1482,9 +1957,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
60224 {
60225 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
60226 int i = 0;
60227- do
60228+ do {
60229 i += 2;
60230- while (auxv[i - 2] != AT_NULL);
60231+ } while (auxv[i - 2] != AT_NULL);
60232 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
60233 }
60234
60235@@ -1493,7 +1968,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
60236 {
60237 mm_segment_t old_fs = get_fs();
60238 set_fs(KERNEL_DS);
60239- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
60240+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
60241 set_fs(old_fs);
60242 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
60243 }
60244@@ -2213,7 +2688,7 @@ static int elf_core_dump(struct coredump_params *cprm)
60245 vma = next_vma(vma, gate_vma)) {
60246 unsigned long dump_size;
60247
60248- dump_size = vma_dump_size(vma, cprm->mm_flags);
60249+ dump_size = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
60250 vma_filesz[i++] = dump_size;
60251 vma_data_size += dump_size;
60252 }
60253@@ -2321,6 +2796,167 @@ out:
60254
60255 #endif /* CONFIG_ELF_CORE */
60256
60257+#ifdef CONFIG_PAX_MPROTECT
60258+/* PaX: non-PIC ELF libraries need relocations on their executable segments
60259+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
60260+ * we'll remove VM_MAYWRITE for good on RELRO segments.
60261+ *
60262+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
60263+ * basis because we want to allow the common case and not the special ones.
60264+ */
60265+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
60266+{
60267+ struct elfhdr elf_h;
60268+ struct elf_phdr elf_p;
60269+ unsigned long i;
60270+ unsigned long oldflags;
60271+ bool is_textrel_rw, is_textrel_rx, is_relro;
60272+
60273+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
60274+ return;
60275+
60276+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
60277+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
60278+
60279+#ifdef CONFIG_PAX_ELFRELOCS
60280+ /* possible TEXTREL */
60281+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
60282+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
60283+#else
60284+ is_textrel_rw = false;
60285+ is_textrel_rx = false;
60286+#endif
60287+
60288+ /* possible RELRO */
60289+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
60290+
60291+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
60292+ return;
60293+
60294+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
60295+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
60296+
60297+#ifdef CONFIG_PAX_ETEXECRELOCS
60298+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
60299+#else
60300+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
60301+#endif
60302+
60303+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
60304+ !elf_check_arch(&elf_h) ||
60305+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
60306+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
60307+ return;
60308+
60309+ for (i = 0UL; i < elf_h.e_phnum; i++) {
60310+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
60311+ return;
60312+ switch (elf_p.p_type) {
60313+ case PT_DYNAMIC:
60314+ if (!is_textrel_rw && !is_textrel_rx)
60315+ continue;
60316+ i = 0UL;
60317+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
60318+ elf_dyn dyn;
60319+
60320+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
60321+ break;
60322+ if (dyn.d_tag == DT_NULL)
60323+ break;
60324+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
60325+ gr_log_textrel(vma);
60326+ if (is_textrel_rw)
60327+ vma->vm_flags |= VM_MAYWRITE;
60328+ else
60329+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
60330+ vma->vm_flags &= ~VM_MAYWRITE;
60331+ break;
60332+ }
60333+ i++;
60334+ }
60335+ is_textrel_rw = false;
60336+ is_textrel_rx = false;
60337+ continue;
60338+
60339+ case PT_GNU_RELRO:
60340+ if (!is_relro)
60341+ continue;
60342+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
60343+ vma->vm_flags &= ~VM_MAYWRITE;
60344+ is_relro = false;
60345+ continue;
60346+
60347+#ifdef CONFIG_PAX_PT_PAX_FLAGS
60348+ case PT_PAX_FLAGS: {
60349+ const char *msg_mprotect = "", *msg_emutramp = "";
60350+ char *buffer_lib, *buffer_exe;
60351+
60352+ if (elf_p.p_flags & PF_NOMPROTECT)
60353+ msg_mprotect = "MPROTECT disabled";
60354+
60355+#ifdef CONFIG_PAX_EMUTRAMP
60356+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
60357+ msg_emutramp = "EMUTRAMP enabled";
60358+#endif
60359+
60360+ if (!msg_mprotect[0] && !msg_emutramp[0])
60361+ continue;
60362+
60363+ if (!printk_ratelimit())
60364+ continue;
60365+
60366+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
60367+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
60368+ if (buffer_lib && buffer_exe) {
60369+ char *path_lib, *path_exe;
60370+
60371+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
60372+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
60373+
60374+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
60375+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
60376+
60377+ }
60378+ free_page((unsigned long)buffer_exe);
60379+ free_page((unsigned long)buffer_lib);
60380+ continue;
60381+ }
60382+#endif
60383+
60384+ }
60385+ }
60386+}
60387+#endif
60388+
60389+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
60390+
60391+extern int grsec_enable_log_rwxmaps;
60392+
60393+static void elf_handle_mmap(struct file *file)
60394+{
60395+ struct elfhdr elf_h;
60396+ struct elf_phdr elf_p;
60397+ unsigned long i;
60398+
60399+ if (!grsec_enable_log_rwxmaps)
60400+ return;
60401+
60402+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
60403+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
60404+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
60405+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
60406+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
60407+ return;
60408+
60409+ for (i = 0UL; i < elf_h.e_phnum; i++) {
60410+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
60411+ return;
60412+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
60413+ gr_log_ptgnustack(file);
60414+ }
60415+}
60416+#endif
60417+
60418 static int __init init_elf_binfmt(void)
60419 {
60420 register_binfmt(&elf_format);
60421diff --git a/fs/block_dev.c b/fs/block_dev.c
60422index 975266b..c3d1856 100644
60423--- a/fs/block_dev.c
60424+++ b/fs/block_dev.c
60425@@ -734,7 +734,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
60426 else if (bdev->bd_contains == bdev)
60427 return true; /* is a whole device which isn't held */
60428
60429- else if (whole->bd_holder == bd_may_claim)
60430+ else if (whole->bd_holder == (void *)bd_may_claim)
60431 return true; /* is a partition of a device that is being partitioned */
60432 else if (whole->bd_holder != NULL)
60433 return false; /* is a partition of a held device */
60434diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
60435index 6d67f32..8f33187 100644
60436--- a/fs/btrfs/ctree.c
60437+++ b/fs/btrfs/ctree.c
60438@@ -1181,9 +1181,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
60439 free_extent_buffer(buf);
60440 add_root_to_dirty_list(root);
60441 } else {
60442- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
60443- parent_start = parent->start;
60444- else
60445+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
60446+ if (parent)
60447+ parent_start = parent->start;
60448+ else
60449+ parent_start = 0;
60450+ } else
60451 parent_start = 0;
60452
60453 WARN_ON(trans->transid != btrfs_header_generation(parent));
60454diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
60455index 82f0c7c..dff78a8 100644
60456--- a/fs/btrfs/delayed-inode.c
60457+++ b/fs/btrfs/delayed-inode.c
60458@@ -462,7 +462,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
60459
60460 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
60461 {
60462- int seq = atomic_inc_return(&delayed_root->items_seq);
60463+ int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
60464 if ((atomic_dec_return(&delayed_root->items) <
60465 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
60466 waitqueue_active(&delayed_root->wait))
60467@@ -1412,7 +1412,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
60468
60469 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
60470 {
60471- int val = atomic_read(&delayed_root->items_seq);
60472+ int val = atomic_read_unchecked(&delayed_root->items_seq);
60473
60474 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
60475 return 1;
60476@@ -1436,7 +1436,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
60477 int seq;
60478 int ret;
60479
60480- seq = atomic_read(&delayed_root->items_seq);
60481+ seq = atomic_read_unchecked(&delayed_root->items_seq);
60482
60483 ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
60484 if (ret)
60485diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
60486index f70119f..ab5894d 100644
60487--- a/fs/btrfs/delayed-inode.h
60488+++ b/fs/btrfs/delayed-inode.h
60489@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
60490 */
60491 struct list_head prepare_list;
60492 atomic_t items; /* for delayed items */
60493- atomic_t items_seq; /* for delayed items */
60494+ atomic_unchecked_t items_seq; /* for delayed items */
60495 int nodes; /* for delayed nodes */
60496 wait_queue_head_t wait;
60497 };
60498@@ -90,7 +90,7 @@ static inline void btrfs_init_delayed_root(
60499 struct btrfs_delayed_root *delayed_root)
60500 {
60501 atomic_set(&delayed_root->items, 0);
60502- atomic_set(&delayed_root->items_seq, 0);
60503+ atomic_set_unchecked(&delayed_root->items_seq, 0);
60504 delayed_root->nodes = 0;
60505 spin_lock_init(&delayed_root->lock);
60506 init_waitqueue_head(&delayed_root->wait);
60507diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
60508index 05fef19..f3774b8 100644
60509--- a/fs/btrfs/super.c
60510+++ b/fs/btrfs/super.c
60511@@ -271,7 +271,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
60512 function, line, errstr);
60513 return;
60514 }
60515- ACCESS_ONCE(trans->transaction->aborted) = errno;
60516+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
60517 /* Wake up anybody who may be waiting on this transaction */
60518 wake_up(&root->fs_info->transaction_wait);
60519 wake_up(&root->fs_info->transaction_blocked_wait);
60520diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
60521index 94edb0a..e94dc93 100644
60522--- a/fs/btrfs/sysfs.c
60523+++ b/fs/btrfs/sysfs.c
60524@@ -472,7 +472,7 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
60525 for (set = 0; set < FEAT_MAX; set++) {
60526 int i;
60527 struct attribute *attrs[2];
60528- struct attribute_group agroup = {
60529+ attribute_group_no_const agroup = {
60530 .name = "features",
60531 .attrs = attrs,
60532 };
60533diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
60534index 2299bfd..4098e72 100644
60535--- a/fs/btrfs/tests/free-space-tests.c
60536+++ b/fs/btrfs/tests/free-space-tests.c
60537@@ -463,7 +463,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
60538 * extent entry.
60539 */
60540 use_bitmap_op = cache->free_space_ctl->op->use_bitmap;
60541- cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
60542+ pax_open_kernel();
60543+ *(void **)&cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
60544+ pax_close_kernel();
60545
60546 /*
60547 * Extent entry covering free space range [128Mb - 256Kb, 128Mb - 128Kb[
60548@@ -870,7 +872,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
60549 if (ret)
60550 return ret;
60551
60552- cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
60553+ pax_open_kernel();
60554+ *(void **)&cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
60555+ pax_close_kernel();
60556 __btrfs_remove_free_space_cache(cache->free_space_ctl);
60557
60558 return 0;
60559diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
60560index 154990c..d0cf699 100644
60561--- a/fs/btrfs/tree-log.h
60562+++ b/fs/btrfs/tree-log.h
60563@@ -43,7 +43,7 @@ static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx)
60564 static inline void btrfs_set_log_full_commit(struct btrfs_fs_info *fs_info,
60565 struct btrfs_trans_handle *trans)
60566 {
60567- ACCESS_ONCE(fs_info->last_trans_log_full_commit) = trans->transid;
60568+ ACCESS_ONCE_RW(fs_info->last_trans_log_full_commit) = trans->transid;
60569 }
60570
60571 static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
60572diff --git a/fs/buffer.c b/fs/buffer.c
60573index 20805db..2e8fc69 100644
60574--- a/fs/buffer.c
60575+++ b/fs/buffer.c
60576@@ -3417,7 +3417,7 @@ void __init buffer_init(void)
60577 bh_cachep = kmem_cache_create("buffer_head",
60578 sizeof(struct buffer_head), 0,
60579 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
60580- SLAB_MEM_SPREAD),
60581+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
60582 NULL);
60583
60584 /*
60585diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
60586index fbb08e9..0fda764 100644
60587--- a/fs/cachefiles/bind.c
60588+++ b/fs/cachefiles/bind.c
60589@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
60590 args);
60591
60592 /* start by checking things over */
60593- ASSERT(cache->fstop_percent >= 0 &&
60594- cache->fstop_percent < cache->fcull_percent &&
60595+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
60596 cache->fcull_percent < cache->frun_percent &&
60597 cache->frun_percent < 100);
60598
60599- ASSERT(cache->bstop_percent >= 0 &&
60600- cache->bstop_percent < cache->bcull_percent &&
60601+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
60602 cache->bcull_percent < cache->brun_percent &&
60603 cache->brun_percent < 100);
60604
60605diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
60606index f601def..b2cf704 100644
60607--- a/fs/cachefiles/daemon.c
60608+++ b/fs/cachefiles/daemon.c
60609@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
60610 if (n > buflen)
60611 return -EMSGSIZE;
60612
60613- if (copy_to_user(_buffer, buffer, n) != 0)
60614+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
60615 return -EFAULT;
60616
60617 return n;
60618@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
60619 if (test_bit(CACHEFILES_DEAD, &cache->flags))
60620 return -EIO;
60621
60622- if (datalen < 0 || datalen > PAGE_SIZE - 1)
60623+ if (datalen > PAGE_SIZE - 1)
60624 return -EOPNOTSUPP;
60625
60626 /* drag the command string into the kernel so we can parse it */
60627@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
60628 if (args[0] != '%' || args[1] != '\0')
60629 return -EINVAL;
60630
60631- if (fstop < 0 || fstop >= cache->fcull_percent)
60632+ if (fstop >= cache->fcull_percent)
60633 return cachefiles_daemon_range_error(cache, args);
60634
60635 cache->fstop_percent = fstop;
60636@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
60637 if (args[0] != '%' || args[1] != '\0')
60638 return -EINVAL;
60639
60640- if (bstop < 0 || bstop >= cache->bcull_percent)
60641+ if (bstop >= cache->bcull_percent)
60642 return cachefiles_daemon_range_error(cache, args);
60643
60644 cache->bstop_percent = bstop;
60645diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
60646index 8c52472..c4e3a69 100644
60647--- a/fs/cachefiles/internal.h
60648+++ b/fs/cachefiles/internal.h
60649@@ -66,7 +66,7 @@ struct cachefiles_cache {
60650 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
60651 struct rb_root active_nodes; /* active nodes (can't be culled) */
60652 rwlock_t active_lock; /* lock for active_nodes */
60653- atomic_t gravecounter; /* graveyard uniquifier */
60654+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
60655 unsigned frun_percent; /* when to stop culling (% files) */
60656 unsigned fcull_percent; /* when to start culling (% files) */
60657 unsigned fstop_percent; /* when to stop allocating (% files) */
60658@@ -178,19 +178,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
60659 * proc.c
60660 */
60661 #ifdef CONFIG_CACHEFILES_HISTOGRAM
60662-extern atomic_t cachefiles_lookup_histogram[HZ];
60663-extern atomic_t cachefiles_mkdir_histogram[HZ];
60664-extern atomic_t cachefiles_create_histogram[HZ];
60665+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
60666+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
60667+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
60668
60669 extern int __init cachefiles_proc_init(void);
60670 extern void cachefiles_proc_cleanup(void);
60671 static inline
60672-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
60673+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
60674 {
60675 unsigned long jif = jiffies - start_jif;
60676 if (jif >= HZ)
60677 jif = HZ - 1;
60678- atomic_inc(&histogram[jif]);
60679+ atomic_inc_unchecked(&histogram[jif]);
60680 }
60681
60682 #else
60683diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
60684index 1e51714..411eded 100644
60685--- a/fs/cachefiles/namei.c
60686+++ b/fs/cachefiles/namei.c
60687@@ -309,7 +309,7 @@ try_again:
60688 /* first step is to make up a grave dentry in the graveyard */
60689 sprintf(nbuffer, "%08x%08x",
60690 (uint32_t) get_seconds(),
60691- (uint32_t) atomic_inc_return(&cache->gravecounter));
60692+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
60693
60694 /* do the multiway lock magic */
60695 trap = lock_rename(cache->graveyard, dir);
60696diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
60697index eccd339..4c1d995 100644
60698--- a/fs/cachefiles/proc.c
60699+++ b/fs/cachefiles/proc.c
60700@@ -14,9 +14,9 @@
60701 #include <linux/seq_file.h>
60702 #include "internal.h"
60703
60704-atomic_t cachefiles_lookup_histogram[HZ];
60705-atomic_t cachefiles_mkdir_histogram[HZ];
60706-atomic_t cachefiles_create_histogram[HZ];
60707+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
60708+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
60709+atomic_unchecked_t cachefiles_create_histogram[HZ];
60710
60711 /*
60712 * display the latency histogram
60713@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
60714 return 0;
60715 default:
60716 index = (unsigned long) v - 3;
60717- x = atomic_read(&cachefiles_lookup_histogram[index]);
60718- y = atomic_read(&cachefiles_mkdir_histogram[index]);
60719- z = atomic_read(&cachefiles_create_histogram[index]);
60720+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
60721+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
60722+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
60723 if (x == 0 && y == 0 && z == 0)
60724 return 0;
60725
60726diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
60727index 83e9976..bfd1eee 100644
60728--- a/fs/ceph/dir.c
60729+++ b/fs/ceph/dir.c
60730@@ -127,6 +127,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
60731 struct dentry *dentry, *last;
60732 struct ceph_dentry_info *di;
60733 int err = 0;
60734+ char d_name[DNAME_INLINE_LEN];
60735+ const unsigned char *name;
60736
60737 /* claim ref on last dentry we returned */
60738 last = fi->dentry;
60739@@ -190,7 +192,12 @@ more:
60740
60741 dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos,
60742 dentry, dentry, dentry->d_inode);
60743- if (!dir_emit(ctx, dentry->d_name.name,
60744+ name = dentry->d_name.name;
60745+ if (name == dentry->d_iname) {
60746+ memcpy(d_name, name, dentry->d_name.len);
60747+ name = d_name;
60748+ }
60749+ if (!dir_emit(ctx, name,
60750 dentry->d_name.len,
60751 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
60752 dentry->d_inode->i_mode >> 12)) {
60753@@ -248,7 +255,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
60754 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
60755 struct ceph_mds_client *mdsc = fsc->mdsc;
60756 unsigned frag = fpos_frag(ctx->pos);
60757- int off = fpos_off(ctx->pos);
60758+ unsigned int off = fpos_off(ctx->pos);
60759 int err;
60760 u32 ftype;
60761 struct ceph_mds_reply_info_parsed *rinfo;
60762diff --git a/fs/ceph/super.c b/fs/ceph/super.c
60763index a63997b..ddc0577 100644
60764--- a/fs/ceph/super.c
60765+++ b/fs/ceph/super.c
60766@@ -889,7 +889,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
60767 /*
60768 * construct our own bdi so we can control readahead, etc.
60769 */
60770-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
60771+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
60772
60773 static int ceph_register_bdi(struct super_block *sb,
60774 struct ceph_fs_client *fsc)
60775@@ -906,7 +906,7 @@ static int ceph_register_bdi(struct super_block *sb,
60776 VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE;
60777
60778 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
60779- atomic_long_inc_return(&bdi_seq));
60780+ atomic_long_inc_return_unchecked(&bdi_seq));
60781 if (!err)
60782 sb->s_bdi = &fsc->backing_dev_info;
60783 return err;
60784diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
60785index 7febcf2..62a5721 100644
60786--- a/fs/cifs/cifs_debug.c
60787+++ b/fs/cifs/cifs_debug.c
60788@@ -269,8 +269,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
60789
60790 if (strtobool(&c, &bv) == 0) {
60791 #ifdef CONFIG_CIFS_STATS2
60792- atomic_set(&totBufAllocCount, 0);
60793- atomic_set(&totSmBufAllocCount, 0);
60794+ atomic_set_unchecked(&totBufAllocCount, 0);
60795+ atomic_set_unchecked(&totSmBufAllocCount, 0);
60796 #endif /* CONFIG_CIFS_STATS2 */
60797 spin_lock(&cifs_tcp_ses_lock);
60798 list_for_each(tmp1, &cifs_tcp_ses_list) {
60799@@ -283,7 +283,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
60800 tcon = list_entry(tmp3,
60801 struct cifs_tcon,
60802 tcon_list);
60803- atomic_set(&tcon->num_smbs_sent, 0);
60804+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
60805 if (server->ops->clear_stats)
60806 server->ops->clear_stats(tcon);
60807 }
60808@@ -315,8 +315,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
60809 smBufAllocCount.counter, cifs_min_small);
60810 #ifdef CONFIG_CIFS_STATS2
60811 seq_printf(m, "Total Large %d Small %d Allocations\n",
60812- atomic_read(&totBufAllocCount),
60813- atomic_read(&totSmBufAllocCount));
60814+ atomic_read_unchecked(&totBufAllocCount),
60815+ atomic_read_unchecked(&totSmBufAllocCount));
60816 #endif /* CONFIG_CIFS_STATS2 */
60817
60818 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
60819@@ -345,7 +345,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
60820 if (tcon->need_reconnect)
60821 seq_puts(m, "\tDISCONNECTED ");
60822 seq_printf(m, "\nSMBs: %d",
60823- atomic_read(&tcon->num_smbs_sent));
60824+ atomic_read_unchecked(&tcon->num_smbs_sent));
60825 if (server->ops->print_stats)
60826 server->ops->print_stats(m, tcon);
60827 }
60828diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
60829index d72fe37..ded5511 100644
60830--- a/fs/cifs/cifsfs.c
60831+++ b/fs/cifs/cifsfs.c
60832@@ -1092,7 +1092,7 @@ cifs_init_request_bufs(void)
60833 */
60834 cifs_req_cachep = kmem_cache_create("cifs_request",
60835 CIFSMaxBufSize + max_hdr_size, 0,
60836- SLAB_HWCACHE_ALIGN, NULL);
60837+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
60838 if (cifs_req_cachep == NULL)
60839 return -ENOMEM;
60840
60841@@ -1119,7 +1119,7 @@ cifs_init_request_bufs(void)
60842 efficient to alloc 1 per page off the slab compared to 17K (5page)
60843 alloc of large cifs buffers even when page debugging is on */
60844 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
60845- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
60846+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
60847 NULL);
60848 if (cifs_sm_req_cachep == NULL) {
60849 mempool_destroy(cifs_req_poolp);
60850@@ -1204,8 +1204,8 @@ init_cifs(void)
60851 atomic_set(&bufAllocCount, 0);
60852 atomic_set(&smBufAllocCount, 0);
60853 #ifdef CONFIG_CIFS_STATS2
60854- atomic_set(&totBufAllocCount, 0);
60855- atomic_set(&totSmBufAllocCount, 0);
60856+ atomic_set_unchecked(&totBufAllocCount, 0);
60857+ atomic_set_unchecked(&totSmBufAllocCount, 0);
60858 #endif /* CONFIG_CIFS_STATS2 */
60859
60860 atomic_set(&midCount, 0);
60861diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
60862index 22b289a..bbbba08 100644
60863--- a/fs/cifs/cifsglob.h
60864+++ b/fs/cifs/cifsglob.h
60865@@ -823,35 +823,35 @@ struct cifs_tcon {
60866 __u16 Flags; /* optional support bits */
60867 enum statusEnum tidStatus;
60868 #ifdef CONFIG_CIFS_STATS
60869- atomic_t num_smbs_sent;
60870+ atomic_unchecked_t num_smbs_sent;
60871 union {
60872 struct {
60873- atomic_t num_writes;
60874- atomic_t num_reads;
60875- atomic_t num_flushes;
60876- atomic_t num_oplock_brks;
60877- atomic_t num_opens;
60878- atomic_t num_closes;
60879- atomic_t num_deletes;
60880- atomic_t num_mkdirs;
60881- atomic_t num_posixopens;
60882- atomic_t num_posixmkdirs;
60883- atomic_t num_rmdirs;
60884- atomic_t num_renames;
60885- atomic_t num_t2renames;
60886- atomic_t num_ffirst;
60887- atomic_t num_fnext;
60888- atomic_t num_fclose;
60889- atomic_t num_hardlinks;
60890- atomic_t num_symlinks;
60891- atomic_t num_locks;
60892- atomic_t num_acl_get;
60893- atomic_t num_acl_set;
60894+ atomic_unchecked_t num_writes;
60895+ atomic_unchecked_t num_reads;
60896+ atomic_unchecked_t num_flushes;
60897+ atomic_unchecked_t num_oplock_brks;
60898+ atomic_unchecked_t num_opens;
60899+ atomic_unchecked_t num_closes;
60900+ atomic_unchecked_t num_deletes;
60901+ atomic_unchecked_t num_mkdirs;
60902+ atomic_unchecked_t num_posixopens;
60903+ atomic_unchecked_t num_posixmkdirs;
60904+ atomic_unchecked_t num_rmdirs;
60905+ atomic_unchecked_t num_renames;
60906+ atomic_unchecked_t num_t2renames;
60907+ atomic_unchecked_t num_ffirst;
60908+ atomic_unchecked_t num_fnext;
60909+ atomic_unchecked_t num_fclose;
60910+ atomic_unchecked_t num_hardlinks;
60911+ atomic_unchecked_t num_symlinks;
60912+ atomic_unchecked_t num_locks;
60913+ atomic_unchecked_t num_acl_get;
60914+ atomic_unchecked_t num_acl_set;
60915 } cifs_stats;
60916 #ifdef CONFIG_CIFS_SMB2
60917 struct {
60918- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
60919- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
60920+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
60921+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
60922 } smb2_stats;
60923 #endif /* CONFIG_CIFS_SMB2 */
60924 } stats;
60925@@ -1198,7 +1198,7 @@ convert_delimiter(char *path, char delim)
60926 }
60927
60928 #ifdef CONFIG_CIFS_STATS
60929-#define cifs_stats_inc atomic_inc
60930+#define cifs_stats_inc atomic_inc_unchecked
60931
60932 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
60933 unsigned int bytes)
60934@@ -1565,8 +1565,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
60935 /* Various Debug counters */
60936 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
60937 #ifdef CONFIG_CIFS_STATS2
60938-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
60939-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
60940+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
60941+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
60942 #endif
60943 GLOBAL_EXTERN atomic_t smBufAllocCount;
60944 GLOBAL_EXTERN atomic_t midCount;
60945diff --git a/fs/cifs/file.c b/fs/cifs/file.c
60946index ca30c39..570fb94 100644
60947--- a/fs/cifs/file.c
60948+++ b/fs/cifs/file.c
60949@@ -2055,10 +2055,14 @@ static int cifs_writepages(struct address_space *mapping,
60950 index = mapping->writeback_index; /* Start from prev offset */
60951 end = -1;
60952 } else {
60953- index = wbc->range_start >> PAGE_CACHE_SHIFT;
60954- end = wbc->range_end >> PAGE_CACHE_SHIFT;
60955- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
60956+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
60957 range_whole = true;
60958+ index = 0;
60959+ end = ULONG_MAX;
60960+ } else {
60961+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
60962+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
60963+ }
60964 scanned = true;
60965 }
60966 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
60967diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
60968index 3379463..3af418a 100644
60969--- a/fs/cifs/misc.c
60970+++ b/fs/cifs/misc.c
60971@@ -170,7 +170,7 @@ cifs_buf_get(void)
60972 memset(ret_buf, 0, buf_size + 3);
60973 atomic_inc(&bufAllocCount);
60974 #ifdef CONFIG_CIFS_STATS2
60975- atomic_inc(&totBufAllocCount);
60976+ atomic_inc_unchecked(&totBufAllocCount);
60977 #endif /* CONFIG_CIFS_STATS2 */
60978 }
60979
60980@@ -205,7 +205,7 @@ cifs_small_buf_get(void)
60981 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
60982 atomic_inc(&smBufAllocCount);
60983 #ifdef CONFIG_CIFS_STATS2
60984- atomic_inc(&totSmBufAllocCount);
60985+ atomic_inc_unchecked(&totSmBufAllocCount);
60986 #endif /* CONFIG_CIFS_STATS2 */
60987
60988 }
60989diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
60990index d297903..1cb7516 100644
60991--- a/fs/cifs/smb1ops.c
60992+++ b/fs/cifs/smb1ops.c
60993@@ -622,27 +622,27 @@ static void
60994 cifs_clear_stats(struct cifs_tcon *tcon)
60995 {
60996 #ifdef CONFIG_CIFS_STATS
60997- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
60998- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
60999- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
61000- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
61001- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
61002- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
61003- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
61004- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
61005- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
61006- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
61007- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
61008- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
61009- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
61010- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
61011- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
61012- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
61013- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
61014- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
61015- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
61016- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
61017- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
61018+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
61019+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
61020+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
61021+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
61022+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
61023+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
61024+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
61025+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
61026+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
61027+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
61028+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
61029+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
61030+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
61031+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
61032+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
61033+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
61034+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
61035+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
61036+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
61037+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
61038+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
61039 #endif
61040 }
61041
61042@@ -651,36 +651,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
61043 {
61044 #ifdef CONFIG_CIFS_STATS
61045 seq_printf(m, " Oplocks breaks: %d",
61046- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
61047+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
61048 seq_printf(m, "\nReads: %d Bytes: %llu",
61049- atomic_read(&tcon->stats.cifs_stats.num_reads),
61050+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
61051 (long long)(tcon->bytes_read));
61052 seq_printf(m, "\nWrites: %d Bytes: %llu",
61053- atomic_read(&tcon->stats.cifs_stats.num_writes),
61054+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
61055 (long long)(tcon->bytes_written));
61056 seq_printf(m, "\nFlushes: %d",
61057- atomic_read(&tcon->stats.cifs_stats.num_flushes));
61058+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
61059 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
61060- atomic_read(&tcon->stats.cifs_stats.num_locks),
61061- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
61062- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
61063+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
61064+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
61065+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
61066 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
61067- atomic_read(&tcon->stats.cifs_stats.num_opens),
61068- atomic_read(&tcon->stats.cifs_stats.num_closes),
61069- atomic_read(&tcon->stats.cifs_stats.num_deletes));
61070+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
61071+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
61072+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
61073 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
61074- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
61075- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
61076+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
61077+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
61078 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
61079- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
61080- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
61081+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
61082+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
61083 seq_printf(m, "\nRenames: %d T2 Renames %d",
61084- atomic_read(&tcon->stats.cifs_stats.num_renames),
61085- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
61086+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
61087+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
61088 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
61089- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
61090- atomic_read(&tcon->stats.cifs_stats.num_fnext),
61091- atomic_read(&tcon->stats.cifs_stats.num_fclose));
61092+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
61093+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
61094+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
61095 #endif
61096 }
61097
61098diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
61099index eab05e1..ffe5ea4 100644
61100--- a/fs/cifs/smb2ops.c
61101+++ b/fs/cifs/smb2ops.c
61102@@ -418,8 +418,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
61103 #ifdef CONFIG_CIFS_STATS
61104 int i;
61105 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
61106- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
61107- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
61108+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
61109+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
61110 }
61111 #endif
61112 }
61113@@ -459,65 +459,65 @@ static void
61114 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
61115 {
61116 #ifdef CONFIG_CIFS_STATS
61117- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
61118- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
61119+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
61120+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
61121 seq_printf(m, "\nNegotiates: %d sent %d failed",
61122- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
61123- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
61124+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
61125+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
61126 seq_printf(m, "\nSessionSetups: %d sent %d failed",
61127- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
61128- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
61129+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
61130+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
61131 seq_printf(m, "\nLogoffs: %d sent %d failed",
61132- atomic_read(&sent[SMB2_LOGOFF_HE]),
61133- atomic_read(&failed[SMB2_LOGOFF_HE]));
61134+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
61135+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
61136 seq_printf(m, "\nTreeConnects: %d sent %d failed",
61137- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
61138- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
61139+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
61140+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
61141 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
61142- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
61143- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
61144+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
61145+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
61146 seq_printf(m, "\nCreates: %d sent %d failed",
61147- atomic_read(&sent[SMB2_CREATE_HE]),
61148- atomic_read(&failed[SMB2_CREATE_HE]));
61149+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
61150+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
61151 seq_printf(m, "\nCloses: %d sent %d failed",
61152- atomic_read(&sent[SMB2_CLOSE_HE]),
61153- atomic_read(&failed[SMB2_CLOSE_HE]));
61154+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
61155+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
61156 seq_printf(m, "\nFlushes: %d sent %d failed",
61157- atomic_read(&sent[SMB2_FLUSH_HE]),
61158- atomic_read(&failed[SMB2_FLUSH_HE]));
61159+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
61160+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
61161 seq_printf(m, "\nReads: %d sent %d failed",
61162- atomic_read(&sent[SMB2_READ_HE]),
61163- atomic_read(&failed[SMB2_READ_HE]));
61164+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
61165+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
61166 seq_printf(m, "\nWrites: %d sent %d failed",
61167- atomic_read(&sent[SMB2_WRITE_HE]),
61168- atomic_read(&failed[SMB2_WRITE_HE]));
61169+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
61170+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
61171 seq_printf(m, "\nLocks: %d sent %d failed",
61172- atomic_read(&sent[SMB2_LOCK_HE]),
61173- atomic_read(&failed[SMB2_LOCK_HE]));
61174+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
61175+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
61176 seq_printf(m, "\nIOCTLs: %d sent %d failed",
61177- atomic_read(&sent[SMB2_IOCTL_HE]),
61178- atomic_read(&failed[SMB2_IOCTL_HE]));
61179+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
61180+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
61181 seq_printf(m, "\nCancels: %d sent %d failed",
61182- atomic_read(&sent[SMB2_CANCEL_HE]),
61183- atomic_read(&failed[SMB2_CANCEL_HE]));
61184+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
61185+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
61186 seq_printf(m, "\nEchos: %d sent %d failed",
61187- atomic_read(&sent[SMB2_ECHO_HE]),
61188- atomic_read(&failed[SMB2_ECHO_HE]));
61189+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
61190+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
61191 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
61192- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
61193- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
61194+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
61195+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
61196 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
61197- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
61198- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
61199+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
61200+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
61201 seq_printf(m, "\nQueryInfos: %d sent %d failed",
61202- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
61203- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
61204+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
61205+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
61206 seq_printf(m, "\nSetInfos: %d sent %d failed",
61207- atomic_read(&sent[SMB2_SET_INFO_HE]),
61208- atomic_read(&failed[SMB2_SET_INFO_HE]));
61209+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
61210+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
61211 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
61212- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
61213- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
61214+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
61215+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
61216 #endif
61217 }
61218
61219diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
61220index 65cd7a8..3518676 100644
61221--- a/fs/cifs/smb2pdu.c
61222+++ b/fs/cifs/smb2pdu.c
61223@@ -2147,8 +2147,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
61224 default:
61225 cifs_dbg(VFS, "info level %u isn't supported\n",
61226 srch_inf->info_level);
61227- rc = -EINVAL;
61228- goto qdir_exit;
61229+ return -EINVAL;
61230 }
61231
61232 req->FileIndex = cpu_to_le32(index);
61233diff --git a/fs/coda/cache.c b/fs/coda/cache.c
61234index 46ee6f2..89a9e7f 100644
61235--- a/fs/coda/cache.c
61236+++ b/fs/coda/cache.c
61237@@ -24,7 +24,7 @@
61238 #include "coda_linux.h"
61239 #include "coda_cache.h"
61240
61241-static atomic_t permission_epoch = ATOMIC_INIT(0);
61242+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
61243
61244 /* replace or extend an acl cache hit */
61245 void coda_cache_enter(struct inode *inode, int mask)
61246@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
61247 struct coda_inode_info *cii = ITOC(inode);
61248
61249 spin_lock(&cii->c_lock);
61250- cii->c_cached_epoch = atomic_read(&permission_epoch);
61251+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
61252 if (!uid_eq(cii->c_uid, current_fsuid())) {
61253 cii->c_uid = current_fsuid();
61254 cii->c_cached_perm = mask;
61255@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
61256 {
61257 struct coda_inode_info *cii = ITOC(inode);
61258 spin_lock(&cii->c_lock);
61259- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
61260+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
61261 spin_unlock(&cii->c_lock);
61262 }
61263
61264 /* remove all acl caches */
61265 void coda_cache_clear_all(struct super_block *sb)
61266 {
61267- atomic_inc(&permission_epoch);
61268+ atomic_inc_unchecked(&permission_epoch);
61269 }
61270
61271
61272@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
61273 spin_lock(&cii->c_lock);
61274 hit = (mask & cii->c_cached_perm) == mask &&
61275 uid_eq(cii->c_uid, current_fsuid()) &&
61276- cii->c_cached_epoch == atomic_read(&permission_epoch);
61277+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
61278 spin_unlock(&cii->c_lock);
61279
61280 return hit;
61281diff --git a/fs/compat.c b/fs/compat.c
61282index 6fd272d..dd34ba2 100644
61283--- a/fs/compat.c
61284+++ b/fs/compat.c
61285@@ -54,7 +54,7 @@
61286 #include <asm/ioctls.h>
61287 #include "internal.h"
61288
61289-int compat_log = 1;
61290+int compat_log = 0;
61291
61292 int compat_printk(const char *fmt, ...)
61293 {
61294@@ -512,7 +512,7 @@ COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_reqs, u32 __user *, ctx32p)
61295
61296 set_fs(KERNEL_DS);
61297 /* The __user pointer cast is valid because of the set_fs() */
61298- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
61299+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
61300 set_fs(oldfs);
61301 /* truncating is ok because it's a user address */
61302 if (!ret)
61303@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
61304 goto out;
61305
61306 ret = -EINVAL;
61307- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
61308+ if (nr_segs > UIO_MAXIOV)
61309 goto out;
61310 if (nr_segs > fast_segs) {
61311 ret = -ENOMEM;
61312@@ -844,6 +844,7 @@ struct compat_old_linux_dirent {
61313 struct compat_readdir_callback {
61314 struct dir_context ctx;
61315 struct compat_old_linux_dirent __user *dirent;
61316+ struct file * file;
61317 int result;
61318 };
61319
61320@@ -863,6 +864,10 @@ static int compat_fillonedir(struct dir_context *ctx, const char *name,
61321 buf->result = -EOVERFLOW;
61322 return -EOVERFLOW;
61323 }
61324+
61325+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
61326+ return 0;
61327+
61328 buf->result++;
61329 dirent = buf->dirent;
61330 if (!access_ok(VERIFY_WRITE, dirent,
61331@@ -894,6 +899,7 @@ COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
61332 if (!f.file)
61333 return -EBADF;
61334
61335+ buf.file = f.file;
61336 error = iterate_dir(f.file, &buf.ctx);
61337 if (buf.result)
61338 error = buf.result;
61339@@ -913,6 +919,7 @@ struct compat_getdents_callback {
61340 struct dir_context ctx;
61341 struct compat_linux_dirent __user *current_dir;
61342 struct compat_linux_dirent __user *previous;
61343+ struct file * file;
61344 int count;
61345 int error;
61346 };
61347@@ -935,6 +942,10 @@ static int compat_filldir(struct dir_context *ctx, const char *name, int namlen,
61348 buf->error = -EOVERFLOW;
61349 return -EOVERFLOW;
61350 }
61351+
61352+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
61353+ return 0;
61354+
61355 dirent = buf->previous;
61356 if (dirent) {
61357 if (__put_user(offset, &dirent->d_off))
61358@@ -980,6 +991,7 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd,
61359 if (!f.file)
61360 return -EBADF;
61361
61362+ buf.file = f.file;
61363 error = iterate_dir(f.file, &buf.ctx);
61364 if (error >= 0)
61365 error = buf.error;
61366@@ -1000,6 +1012,7 @@ struct compat_getdents_callback64 {
61367 struct dir_context ctx;
61368 struct linux_dirent64 __user *current_dir;
61369 struct linux_dirent64 __user *previous;
61370+ struct file * file;
61371 int count;
61372 int error;
61373 };
61374@@ -1018,6 +1031,10 @@ static int compat_filldir64(struct dir_context *ctx, const char *name,
61375 buf->error = -EINVAL; /* only used if we fail.. */
61376 if (reclen > buf->count)
61377 return -EINVAL;
61378+
61379+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
61380+ return 0;
61381+
61382 dirent = buf->previous;
61383
61384 if (dirent) {
61385@@ -1067,6 +1084,7 @@ COMPAT_SYSCALL_DEFINE3(getdents64, unsigned int, fd,
61386 if (!f.file)
61387 return -EBADF;
61388
61389+ buf.file = f.file;
61390 error = iterate_dir(f.file, &buf.ctx);
61391 if (error >= 0)
61392 error = buf.error;
61393diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
61394index 4d24d17..4f8c09e 100644
61395--- a/fs/compat_binfmt_elf.c
61396+++ b/fs/compat_binfmt_elf.c
61397@@ -30,11 +30,13 @@
61398 #undef elf_phdr
61399 #undef elf_shdr
61400 #undef elf_note
61401+#undef elf_dyn
61402 #undef elf_addr_t
61403 #define elfhdr elf32_hdr
61404 #define elf_phdr elf32_phdr
61405 #define elf_shdr elf32_shdr
61406 #define elf_note elf32_note
61407+#define elf_dyn Elf32_Dyn
61408 #define elf_addr_t Elf32_Addr
61409
61410 /*
61411diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
61412index afec645..9c65620 100644
61413--- a/fs/compat_ioctl.c
61414+++ b/fs/compat_ioctl.c
61415@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
61416 return -EFAULT;
61417 if (__get_user(udata, &ss32->iomem_base))
61418 return -EFAULT;
61419- ss.iomem_base = compat_ptr(udata);
61420+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
61421 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
61422 __get_user(ss.port_high, &ss32->port_high))
61423 return -EFAULT;
61424@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
61425 for (i = 0; i < nmsgs; i++) {
61426 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
61427 return -EFAULT;
61428- if (get_user(datap, &umsgs[i].buf) ||
61429- put_user(compat_ptr(datap), &tmsgs[i].buf))
61430+ if (get_user(datap, (compat_caddr_t __user *)&umsgs[i].buf) ||
61431+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
61432 return -EFAULT;
61433 }
61434 return sys_ioctl(fd, cmd, (unsigned long)tdata);
61435@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
61436 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
61437 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
61438 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
61439- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
61440+ copy_in_user(p->l_pad, p32->l_pad, 4*sizeof(u32)))
61441 return -EFAULT;
61442
61443 return ioctl_preallocate(file, p);
61444@@ -1618,8 +1618,8 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
61445 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
61446 {
61447 unsigned int a, b;
61448- a = *(unsigned int *)p;
61449- b = *(unsigned int *)q;
61450+ a = *(const unsigned int *)p;
61451+ b = *(const unsigned int *)q;
61452 if (a > b)
61453 return 1;
61454 if (a < b)
61455diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
61456index cf0db00..c7f70e8 100644
61457--- a/fs/configfs/dir.c
61458+++ b/fs/configfs/dir.c
61459@@ -1540,7 +1540,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
61460 }
61461 for (p = q->next; p != &parent_sd->s_children; p = p->next) {
61462 struct configfs_dirent *next;
61463- const char *name;
61464+ const unsigned char * name;
61465+ char d_name[sizeof(next->s_dentry->d_iname)];
61466 int len;
61467 struct inode *inode = NULL;
61468
61469@@ -1549,7 +1550,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
61470 continue;
61471
61472 name = configfs_get_name(next);
61473- len = strlen(name);
61474+ if (next->s_dentry && name == next->s_dentry->d_iname) {
61475+ len = next->s_dentry->d_name.len;
61476+ memcpy(d_name, name, len);
61477+ name = d_name;
61478+ } else
61479+ len = strlen(name);
61480
61481 /*
61482 * We'll have a dentry and an inode for
61483diff --git a/fs/coredump.c b/fs/coredump.c
61484index bbbe139..b76fae5 100644
61485--- a/fs/coredump.c
61486+++ b/fs/coredump.c
61487@@ -450,8 +450,8 @@ static void wait_for_dump_helpers(struct file *file)
61488 struct pipe_inode_info *pipe = file->private_data;
61489
61490 pipe_lock(pipe);
61491- pipe->readers++;
61492- pipe->writers--;
61493+ atomic_inc(&pipe->readers);
61494+ atomic_dec(&pipe->writers);
61495 wake_up_interruptible_sync(&pipe->wait);
61496 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
61497 pipe_unlock(pipe);
61498@@ -460,11 +460,11 @@ static void wait_for_dump_helpers(struct file *file)
61499 * We actually want wait_event_freezable() but then we need
61500 * to clear TIF_SIGPENDING and improve dump_interrupted().
61501 */
61502- wait_event_interruptible(pipe->wait, pipe->readers == 1);
61503+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
61504
61505 pipe_lock(pipe);
61506- pipe->readers--;
61507- pipe->writers++;
61508+ atomic_dec(&pipe->readers);
61509+ atomic_inc(&pipe->writers);
61510 pipe_unlock(pipe);
61511 }
61512
61513@@ -511,7 +511,9 @@ void do_coredump(const siginfo_t *siginfo)
61514 struct files_struct *displaced;
61515 bool need_nonrelative = false;
61516 bool core_dumped = false;
61517- static atomic_t core_dump_count = ATOMIC_INIT(0);
61518+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
61519+ long signr = siginfo->si_signo;
61520+ int dumpable;
61521 struct coredump_params cprm = {
61522 .siginfo = siginfo,
61523 .regs = signal_pt_regs(),
61524@@ -524,12 +526,17 @@ void do_coredump(const siginfo_t *siginfo)
61525 .mm_flags = mm->flags,
61526 };
61527
61528- audit_core_dumps(siginfo->si_signo);
61529+ audit_core_dumps(signr);
61530+
61531+ dumpable = __get_dumpable(cprm.mm_flags);
61532+
61533+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
61534+ gr_handle_brute_attach(dumpable);
61535
61536 binfmt = mm->binfmt;
61537 if (!binfmt || !binfmt->core_dump)
61538 goto fail;
61539- if (!__get_dumpable(cprm.mm_flags))
61540+ if (!dumpable)
61541 goto fail;
61542
61543 cred = prepare_creds();
61544@@ -548,7 +555,7 @@ void do_coredump(const siginfo_t *siginfo)
61545 need_nonrelative = true;
61546 }
61547
61548- retval = coredump_wait(siginfo->si_signo, &core_state);
61549+ retval = coredump_wait(signr, &core_state);
61550 if (retval < 0)
61551 goto fail_creds;
61552
61553@@ -591,7 +598,7 @@ void do_coredump(const siginfo_t *siginfo)
61554 }
61555 cprm.limit = RLIM_INFINITY;
61556
61557- dump_count = atomic_inc_return(&core_dump_count);
61558+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
61559 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
61560 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
61561 task_tgid_vnr(current), current->comm);
61562@@ -623,6 +630,8 @@ void do_coredump(const siginfo_t *siginfo)
61563 } else {
61564 struct inode *inode;
61565
61566+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
61567+
61568 if (cprm.limit < binfmt->min_coredump)
61569 goto fail_unlock;
61570
61571@@ -681,7 +690,7 @@ close_fail:
61572 filp_close(cprm.file, NULL);
61573 fail_dropcount:
61574 if (ispipe)
61575- atomic_dec(&core_dump_count);
61576+ atomic_dec_unchecked(&core_dump_count);
61577 fail_unlock:
61578 kfree(cn.corename);
61579 coredump_finish(mm, core_dumped);
61580@@ -702,6 +711,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
61581 struct file *file = cprm->file;
61582 loff_t pos = file->f_pos;
61583 ssize_t n;
61584+
61585+ gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1);
61586 if (cprm->written + nr > cprm->limit)
61587 return 0;
61588 while (nr) {
61589diff --git a/fs/dcache.c b/fs/dcache.c
61590index c71e373..5c1f656 100644
61591--- a/fs/dcache.c
61592+++ b/fs/dcache.c
61593@@ -511,7 +511,7 @@ static void __dentry_kill(struct dentry *dentry)
61594 * dentry_iput drops the locks, at which point nobody (except
61595 * transient RCU lookups) can reach this dentry.
61596 */
61597- BUG_ON(dentry->d_lockref.count > 0);
61598+ BUG_ON(__lockref_read(&dentry->d_lockref) > 0);
61599 this_cpu_dec(nr_dentry);
61600 if (dentry->d_op && dentry->d_op->d_release)
61601 dentry->d_op->d_release(dentry);
61602@@ -564,7 +564,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
61603 struct dentry *parent = dentry->d_parent;
61604 if (IS_ROOT(dentry))
61605 return NULL;
61606- if (unlikely(dentry->d_lockref.count < 0))
61607+ if (unlikely(__lockref_read(&dentry->d_lockref) < 0))
61608 return NULL;
61609 if (likely(spin_trylock(&parent->d_lock)))
61610 return parent;
61611@@ -626,8 +626,8 @@ static inline bool fast_dput(struct dentry *dentry)
61612 */
61613 if (unlikely(ret < 0)) {
61614 spin_lock(&dentry->d_lock);
61615- if (dentry->d_lockref.count > 1) {
61616- dentry->d_lockref.count--;
61617+ if (__lockref_read(&dentry->d_lockref) > 1) {
61618+ __lockref_dec(&dentry->d_lockref);
61619 spin_unlock(&dentry->d_lock);
61620 return 1;
61621 }
61622@@ -682,7 +682,7 @@ static inline bool fast_dput(struct dentry *dentry)
61623 * else could have killed it and marked it dead. Either way, we
61624 * don't need to do anything else.
61625 */
61626- if (dentry->d_lockref.count) {
61627+ if (__lockref_read(&dentry->d_lockref)) {
61628 spin_unlock(&dentry->d_lock);
61629 return 1;
61630 }
61631@@ -692,7 +692,7 @@ static inline bool fast_dput(struct dentry *dentry)
61632 * lock, and we just tested that it was zero, so we can just
61633 * set it to 1.
61634 */
61635- dentry->d_lockref.count = 1;
61636+ __lockref_set(&dentry->d_lockref, 1);
61637 return 0;
61638 }
61639
61640@@ -751,7 +751,7 @@ repeat:
61641 dentry->d_flags |= DCACHE_REFERENCED;
61642 dentry_lru_add(dentry);
61643
61644- dentry->d_lockref.count--;
61645+ __lockref_dec(&dentry->d_lockref);
61646 spin_unlock(&dentry->d_lock);
61647 return;
61648
61649@@ -766,7 +766,7 @@ EXPORT_SYMBOL(dput);
61650 /* This must be called with d_lock held */
61651 static inline void __dget_dlock(struct dentry *dentry)
61652 {
61653- dentry->d_lockref.count++;
61654+ __lockref_inc(&dentry->d_lockref);
61655 }
61656
61657 static inline void __dget(struct dentry *dentry)
61658@@ -807,8 +807,8 @@ repeat:
61659 goto repeat;
61660 }
61661 rcu_read_unlock();
61662- BUG_ON(!ret->d_lockref.count);
61663- ret->d_lockref.count++;
61664+ BUG_ON(!__lockref_read(&ret->d_lockref));
61665+ __lockref_inc(&ret->d_lockref);
61666 spin_unlock(&ret->d_lock);
61667 return ret;
61668 }
61669@@ -886,9 +886,9 @@ restart:
61670 spin_lock(&inode->i_lock);
61671 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
61672 spin_lock(&dentry->d_lock);
61673- if (!dentry->d_lockref.count) {
61674+ if (!__lockref_read(&dentry->d_lockref)) {
61675 struct dentry *parent = lock_parent(dentry);
61676- if (likely(!dentry->d_lockref.count)) {
61677+ if (likely(!__lockref_read(&dentry->d_lockref))) {
61678 __dentry_kill(dentry);
61679 dput(parent);
61680 goto restart;
61681@@ -923,7 +923,7 @@ static void shrink_dentry_list(struct list_head *list)
61682 * We found an inuse dentry which was not removed from
61683 * the LRU because of laziness during lookup. Do not free it.
61684 */
61685- if (dentry->d_lockref.count > 0) {
61686+ if (__lockref_read(&dentry->d_lockref) > 0) {
61687 spin_unlock(&dentry->d_lock);
61688 if (parent)
61689 spin_unlock(&parent->d_lock);
61690@@ -961,8 +961,8 @@ static void shrink_dentry_list(struct list_head *list)
61691 dentry = parent;
61692 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
61693 parent = lock_parent(dentry);
61694- if (dentry->d_lockref.count != 1) {
61695- dentry->d_lockref.count--;
61696+ if (__lockref_read(&dentry->d_lockref) != 1) {
61697+ __lockref_inc(&dentry->d_lockref);
61698 spin_unlock(&dentry->d_lock);
61699 if (parent)
61700 spin_unlock(&parent->d_lock);
61701@@ -1002,7 +1002,7 @@ static enum lru_status dentry_lru_isolate(struct list_head *item,
61702 * counts, just remove them from the LRU. Otherwise give them
61703 * another pass through the LRU.
61704 */
61705- if (dentry->d_lockref.count) {
61706+ if (__lockref_read(&dentry->d_lockref)) {
61707 d_lru_isolate(lru, dentry);
61708 spin_unlock(&dentry->d_lock);
61709 return LRU_REMOVED;
61710@@ -1336,7 +1336,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
61711 } else {
61712 if (dentry->d_flags & DCACHE_LRU_LIST)
61713 d_lru_del(dentry);
61714- if (!dentry->d_lockref.count) {
61715+ if (!__lockref_read(&dentry->d_lockref)) {
61716 d_shrink_add(dentry, &data->dispose);
61717 data->found++;
61718 }
61719@@ -1384,7 +1384,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
61720 return D_WALK_CONTINUE;
61721
61722 /* root with refcount 1 is fine */
61723- if (dentry == _data && dentry->d_lockref.count == 1)
61724+ if (dentry == _data && __lockref_read(&dentry->d_lockref) == 1)
61725 return D_WALK_CONTINUE;
61726
61727 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
61728@@ -1393,7 +1393,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
61729 dentry->d_inode ?
61730 dentry->d_inode->i_ino : 0UL,
61731 dentry,
61732- dentry->d_lockref.count,
61733+ __lockref_read(&dentry->d_lockref),
61734 dentry->d_sb->s_type->name,
61735 dentry->d_sb->s_id);
61736 WARN_ON(1);
61737@@ -1534,7 +1534,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
61738 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
61739 if (name->len > DNAME_INLINE_LEN-1) {
61740 size_t size = offsetof(struct external_name, name[1]);
61741- struct external_name *p = kmalloc(size + name->len, GFP_KERNEL);
61742+ struct external_name *p = kmalloc(round_up(size + name->len, sizeof(unsigned long)), GFP_KERNEL);
61743 if (!p) {
61744 kmem_cache_free(dentry_cache, dentry);
61745 return NULL;
61746@@ -1557,7 +1557,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
61747 smp_wmb();
61748 dentry->d_name.name = dname;
61749
61750- dentry->d_lockref.count = 1;
61751+ __lockref_set(&dentry->d_lockref, 1);
61752 dentry->d_flags = 0;
61753 spin_lock_init(&dentry->d_lock);
61754 seqcount_init(&dentry->d_seq);
61755@@ -1566,6 +1566,9 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
61756 dentry->d_sb = sb;
61757 dentry->d_op = NULL;
61758 dentry->d_fsdata = NULL;
61759+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
61760+ atomic_set(&dentry->chroot_refcnt, 0);
61761+#endif
61762 INIT_HLIST_BL_NODE(&dentry->d_hash);
61763 INIT_LIST_HEAD(&dentry->d_lru);
61764 INIT_LIST_HEAD(&dentry->d_subdirs);
61765@@ -2290,7 +2293,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
61766 goto next;
61767 }
61768
61769- dentry->d_lockref.count++;
61770+ __lockref_inc(&dentry->d_lockref);
61771 found = dentry;
61772 spin_unlock(&dentry->d_lock);
61773 break;
61774@@ -2358,7 +2361,7 @@ again:
61775 spin_lock(&dentry->d_lock);
61776 inode = dentry->d_inode;
61777 isdir = S_ISDIR(inode->i_mode);
61778- if (dentry->d_lockref.count == 1) {
61779+ if (__lockref_read(&dentry->d_lockref) == 1) {
61780 if (!spin_trylock(&inode->i_lock)) {
61781 spin_unlock(&dentry->d_lock);
61782 cpu_relax();
61783@@ -3311,7 +3314,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
61784
61785 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
61786 dentry->d_flags |= DCACHE_GENOCIDE;
61787- dentry->d_lockref.count--;
61788+ __lockref_dec(&dentry->d_lockref);
61789 }
61790 }
61791 return D_WALK_CONTINUE;
61792@@ -3427,7 +3430,8 @@ void __init vfs_caches_init(unsigned long mempages)
61793 mempages -= reserve;
61794
61795 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
61796- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
61797+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
61798+ SLAB_NO_SANITIZE, NULL);
61799
61800 dcache_init();
61801 inode_init();
61802diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
61803index 96400ab..906103d 100644
61804--- a/fs/debugfs/inode.c
61805+++ b/fs/debugfs/inode.c
61806@@ -386,6 +386,10 @@ struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
61807 }
61808 EXPORT_SYMBOL_GPL(debugfs_create_file_size);
61809
61810+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
61811+extern int grsec_enable_sysfs_restrict;
61812+#endif
61813+
61814 /**
61815 * debugfs_create_dir - create a directory in the debugfs filesystem
61816 * @name: a pointer to a string containing the name of the directory to
61817@@ -404,6 +408,10 @@ EXPORT_SYMBOL_GPL(debugfs_create_file_size);
61818 * If debugfs is not enabled in the kernel, the value -%ENODEV will be
61819 * returned.
61820 */
61821+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
61822+extern int grsec_enable_sysfs_restrict;
61823+#endif
61824+
61825 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
61826 {
61827 struct dentry *dentry = start_creating(name, parent);
61828@@ -416,7 +424,12 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
61829 if (unlikely(!inode))
61830 return failed_creating(dentry);
61831
61832- inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
61833+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
61834+ if (grsec_enable_sysfs_restrict)
61835+ inode->i_mode = S_IFDIR | S_IRWXU;
61836+ else
61837+#endif
61838+ inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
61839 inode->i_op = &simple_dir_inode_operations;
61840 inode->i_fop = &simple_dir_operations;
61841
61842diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
61843index b08b518..d6acffa 100644
61844--- a/fs/ecryptfs/inode.c
61845+++ b/fs/ecryptfs/inode.c
61846@@ -663,7 +663,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz)
61847 old_fs = get_fs();
61848 set_fs(get_ds());
61849 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
61850- (char __user *)lower_buf,
61851+ (char __force_user *)lower_buf,
61852 PATH_MAX);
61853 set_fs(old_fs);
61854 if (rc < 0)
61855diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
61856index e4141f2..d8263e8 100644
61857--- a/fs/ecryptfs/miscdev.c
61858+++ b/fs/ecryptfs/miscdev.c
61859@@ -304,7 +304,7 @@ check_list:
61860 goto out_unlock_msg_ctx;
61861 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
61862 if (msg_ctx->msg) {
61863- if (copy_to_user(&buf[i], packet_length, packet_length_size))
61864+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
61865 goto out_unlock_msg_ctx;
61866 i += packet_length_size;
61867 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
61868diff --git a/fs/exec.c b/fs/exec.c
61869index 00400cf..b9dca28 100644
61870--- a/fs/exec.c
61871+++ b/fs/exec.c
61872@@ -56,8 +56,20 @@
61873 #include <linux/pipe_fs_i.h>
61874 #include <linux/oom.h>
61875 #include <linux/compat.h>
61876+#include <linux/random.h>
61877+#include <linux/seq_file.h>
61878+#include <linux/coredump.h>
61879+#include <linux/mman.h>
61880+
61881+#ifdef CONFIG_PAX_REFCOUNT
61882+#include <linux/kallsyms.h>
61883+#include <linux/kdebug.h>
61884+#endif
61885+
61886+#include <trace/events/fs.h>
61887
61888 #include <asm/uaccess.h>
61889+#include <asm/sections.h>
61890 #include <asm/mmu_context.h>
61891 #include <asm/tlb.h>
61892
61893@@ -66,19 +78,34 @@
61894
61895 #include <trace/events/sched.h>
61896
61897+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
61898+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
61899+{
61900+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
61901+}
61902+#endif
61903+
61904+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
61905+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
61906+EXPORT_SYMBOL(pax_set_initial_flags_func);
61907+#endif
61908+
61909 int suid_dumpable = 0;
61910
61911 static LIST_HEAD(formats);
61912 static DEFINE_RWLOCK(binfmt_lock);
61913
61914+extern int gr_process_kernel_exec_ban(void);
61915+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
61916+
61917 void __register_binfmt(struct linux_binfmt * fmt, int insert)
61918 {
61919 BUG_ON(!fmt);
61920 if (WARN_ON(!fmt->load_binary))
61921 return;
61922 write_lock(&binfmt_lock);
61923- insert ? list_add(&fmt->lh, &formats) :
61924- list_add_tail(&fmt->lh, &formats);
61925+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
61926+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
61927 write_unlock(&binfmt_lock);
61928 }
61929
61930@@ -87,7 +114,7 @@ EXPORT_SYMBOL(__register_binfmt);
61931 void unregister_binfmt(struct linux_binfmt * fmt)
61932 {
61933 write_lock(&binfmt_lock);
61934- list_del(&fmt->lh);
61935+ pax_list_del((struct list_head *)&fmt->lh);
61936 write_unlock(&binfmt_lock);
61937 }
61938
61939@@ -183,18 +210,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
61940 int write)
61941 {
61942 struct page *page;
61943- int ret;
61944
61945-#ifdef CONFIG_STACK_GROWSUP
61946- if (write) {
61947- ret = expand_downwards(bprm->vma, pos);
61948- if (ret < 0)
61949- return NULL;
61950- }
61951-#endif
61952- ret = get_user_pages(current, bprm->mm, pos,
61953- 1, write, 1, &page, NULL);
61954- if (ret <= 0)
61955+ if (0 > expand_downwards(bprm->vma, pos))
61956+ return NULL;
61957+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
61958 return NULL;
61959
61960 if (write) {
61961@@ -210,6 +229,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
61962 if (size <= ARG_MAX)
61963 return page;
61964
61965+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61966+ // only allow 512KB for argv+env on suid/sgid binaries
61967+ // to prevent easy ASLR exhaustion
61968+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
61969+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
61970+ (size > (512 * 1024))) {
61971+ put_page(page);
61972+ return NULL;
61973+ }
61974+#endif
61975+
61976 /*
61977 * Limit to 1/4-th the stack size for the argv+env strings.
61978 * This ensures that:
61979@@ -269,6 +299,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
61980 vma->vm_end = STACK_TOP_MAX;
61981 vma->vm_start = vma->vm_end - PAGE_SIZE;
61982 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
61983+
61984+#ifdef CONFIG_PAX_SEGMEXEC
61985+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
61986+#endif
61987+
61988 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
61989 INIT_LIST_HEAD(&vma->anon_vma_chain);
61990
61991@@ -280,6 +315,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
61992 arch_bprm_mm_init(mm, vma);
61993 up_write(&mm->mmap_sem);
61994 bprm->p = vma->vm_end - sizeof(void *);
61995+
61996+#ifdef CONFIG_PAX_RANDUSTACK
61997+ if (randomize_va_space)
61998+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
61999+#endif
62000+
62001 return 0;
62002 err:
62003 up_write(&mm->mmap_sem);
62004@@ -396,7 +437,7 @@ struct user_arg_ptr {
62005 } ptr;
62006 };
62007
62008-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
62009+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
62010 {
62011 const char __user *native;
62012
62013@@ -405,14 +446,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
62014 compat_uptr_t compat;
62015
62016 if (get_user(compat, argv.ptr.compat + nr))
62017- return ERR_PTR(-EFAULT);
62018+ return (const char __force_user *)ERR_PTR(-EFAULT);
62019
62020 return compat_ptr(compat);
62021 }
62022 #endif
62023
62024 if (get_user(native, argv.ptr.native + nr))
62025- return ERR_PTR(-EFAULT);
62026+ return (const char __force_user *)ERR_PTR(-EFAULT);
62027
62028 return native;
62029 }
62030@@ -431,7 +472,7 @@ static int count(struct user_arg_ptr argv, int max)
62031 if (!p)
62032 break;
62033
62034- if (IS_ERR(p))
62035+ if (IS_ERR((const char __force_kernel *)p))
62036 return -EFAULT;
62037
62038 if (i >= max)
62039@@ -466,7 +507,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
62040
62041 ret = -EFAULT;
62042 str = get_user_arg_ptr(argv, argc);
62043- if (IS_ERR(str))
62044+ if (IS_ERR((const char __force_kernel *)str))
62045 goto out;
62046
62047 len = strnlen_user(str, MAX_ARG_STRLEN);
62048@@ -548,7 +589,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
62049 int r;
62050 mm_segment_t oldfs = get_fs();
62051 struct user_arg_ptr argv = {
62052- .ptr.native = (const char __user *const __user *)__argv,
62053+ .ptr.native = (const char __user * const __force_user *)__argv,
62054 };
62055
62056 set_fs(KERNEL_DS);
62057@@ -583,7 +624,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
62058 unsigned long new_end = old_end - shift;
62059 struct mmu_gather tlb;
62060
62061- BUG_ON(new_start > new_end);
62062+ if (new_start >= new_end || new_start < mmap_min_addr)
62063+ return -ENOMEM;
62064
62065 /*
62066 * ensure there are no vmas between where we want to go
62067@@ -592,6 +634,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
62068 if (vma != find_vma(mm, new_start))
62069 return -EFAULT;
62070
62071+#ifdef CONFIG_PAX_SEGMEXEC
62072+ BUG_ON(pax_find_mirror_vma(vma));
62073+#endif
62074+
62075 /*
62076 * cover the whole range: [new_start, old_end)
62077 */
62078@@ -672,10 +718,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
62079 stack_top = arch_align_stack(stack_top);
62080 stack_top = PAGE_ALIGN(stack_top);
62081
62082- if (unlikely(stack_top < mmap_min_addr) ||
62083- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
62084- return -ENOMEM;
62085-
62086 stack_shift = vma->vm_end - stack_top;
62087
62088 bprm->p -= stack_shift;
62089@@ -687,8 +729,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
62090 bprm->exec -= stack_shift;
62091
62092 down_write(&mm->mmap_sem);
62093+
62094+ /* Move stack pages down in memory. */
62095+ if (stack_shift) {
62096+ ret = shift_arg_pages(vma, stack_shift);
62097+ if (ret)
62098+ goto out_unlock;
62099+ }
62100+
62101 vm_flags = VM_STACK_FLAGS;
62102
62103+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
62104+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
62105+ vm_flags &= ~VM_EXEC;
62106+
62107+#ifdef CONFIG_PAX_MPROTECT
62108+ if (mm->pax_flags & MF_PAX_MPROTECT)
62109+ vm_flags &= ~VM_MAYEXEC;
62110+#endif
62111+
62112+ }
62113+#endif
62114+
62115 /*
62116 * Adjust stack execute permissions; explicitly enable for
62117 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
62118@@ -707,13 +769,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
62119 goto out_unlock;
62120 BUG_ON(prev != vma);
62121
62122- /* Move stack pages down in memory. */
62123- if (stack_shift) {
62124- ret = shift_arg_pages(vma, stack_shift);
62125- if (ret)
62126- goto out_unlock;
62127- }
62128-
62129 /* mprotect_fixup is overkill to remove the temporary stack flags */
62130 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
62131
62132@@ -737,6 +792,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
62133 #endif
62134 current->mm->start_stack = bprm->p;
62135 ret = expand_stack(vma, stack_base);
62136+
62137+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
62138+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
62139+ unsigned long size;
62140+ vm_flags_t vm_flags;
62141+
62142+ size = STACK_TOP - vma->vm_end;
62143+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
62144+
62145+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
62146+
62147+#ifdef CONFIG_X86
62148+ if (!ret) {
62149+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
62150+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
62151+ }
62152+#endif
62153+
62154+ }
62155+#endif
62156+
62157 if (ret)
62158 ret = -EFAULT;
62159
62160@@ -781,8 +857,10 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
62161 if (err)
62162 goto exit;
62163
62164- if (name->name[0] != '\0')
62165+ if (name->name[0] != '\0') {
62166 fsnotify_open(file);
62167+ trace_open_exec(name->name);
62168+ }
62169
62170 out:
62171 return file;
62172@@ -815,7 +893,7 @@ int kernel_read(struct file *file, loff_t offset,
62173 old_fs = get_fs();
62174 set_fs(get_ds());
62175 /* The cast to a user pointer is valid due to the set_fs() */
62176- result = vfs_read(file, (void __user *)addr, count, &pos);
62177+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
62178 set_fs(old_fs);
62179 return result;
62180 }
62181@@ -860,6 +938,7 @@ static int exec_mmap(struct mm_struct *mm)
62182 tsk->mm = mm;
62183 tsk->active_mm = mm;
62184 activate_mm(active_mm, mm);
62185+ populate_stack();
62186 tsk->mm->vmacache_seqnum = 0;
62187 vmacache_flush(tsk);
62188 task_unlock(tsk);
62189@@ -926,10 +1005,14 @@ static int de_thread(struct task_struct *tsk)
62190 if (!thread_group_leader(tsk)) {
62191 struct task_struct *leader = tsk->group_leader;
62192
62193- sig->notify_count = -1; /* for exit_notify() */
62194 for (;;) {
62195 threadgroup_change_begin(tsk);
62196 write_lock_irq(&tasklist_lock);
62197+ /*
62198+ * Do this under tasklist_lock to ensure that
62199+ * exit_notify() can't miss ->group_exit_task
62200+ */
62201+ sig->notify_count = -1;
62202 if (likely(leader->exit_state))
62203 break;
62204 __set_current_state(TASK_KILLABLE);
62205@@ -1258,7 +1341,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
62206 }
62207 rcu_read_unlock();
62208
62209- if (p->fs->users > n_fs)
62210+ if (atomic_read(&p->fs->users) > n_fs)
62211 bprm->unsafe |= LSM_UNSAFE_SHARE;
62212 else
62213 p->fs->in_exec = 1;
62214@@ -1459,6 +1542,31 @@ static int exec_binprm(struct linux_binprm *bprm)
62215 return ret;
62216 }
62217
62218+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62219+static DEFINE_PER_CPU(u64, exec_counter);
62220+static int __init init_exec_counters(void)
62221+{
62222+ unsigned int cpu;
62223+
62224+ for_each_possible_cpu(cpu) {
62225+ per_cpu(exec_counter, cpu) = (u64)cpu;
62226+ }
62227+
62228+ return 0;
62229+}
62230+early_initcall(init_exec_counters);
62231+static inline void increment_exec_counter(void)
62232+{
62233+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
62234+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
62235+}
62236+#else
62237+static inline void increment_exec_counter(void) {}
62238+#endif
62239+
62240+extern void gr_handle_exec_args(struct linux_binprm *bprm,
62241+ struct user_arg_ptr argv);
62242+
62243 /*
62244 * sys_execve() executes a new program.
62245 */
62246@@ -1467,6 +1575,11 @@ static int do_execveat_common(int fd, struct filename *filename,
62247 struct user_arg_ptr envp,
62248 int flags)
62249 {
62250+#ifdef CONFIG_GRKERNSEC
62251+ struct file *old_exec_file;
62252+ struct acl_subject_label *old_acl;
62253+ struct rlimit old_rlim[RLIM_NLIMITS];
62254+#endif
62255 char *pathbuf = NULL;
62256 struct linux_binprm *bprm;
62257 struct file *file;
62258@@ -1476,6 +1589,8 @@ static int do_execveat_common(int fd, struct filename *filename,
62259 if (IS_ERR(filename))
62260 return PTR_ERR(filename);
62261
62262+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
62263+
62264 /*
62265 * We move the actual failure in case of RLIMIT_NPROC excess from
62266 * set*uid() to execve() because too many poorly written programs
62267@@ -1513,6 +1628,11 @@ static int do_execveat_common(int fd, struct filename *filename,
62268 if (IS_ERR(file))
62269 goto out_unmark;
62270
62271+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
62272+ retval = -EPERM;
62273+ goto out_unmark;
62274+ }
62275+
62276 sched_exec();
62277
62278 bprm->file = file;
62279@@ -1539,6 +1659,11 @@ static int do_execveat_common(int fd, struct filename *filename,
62280 }
62281 bprm->interp = bprm->filename;
62282
62283+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
62284+ retval = -EACCES;
62285+ goto out_unmark;
62286+ }
62287+
62288 retval = bprm_mm_init(bprm);
62289 if (retval)
62290 goto out_unmark;
62291@@ -1555,24 +1680,70 @@ static int do_execveat_common(int fd, struct filename *filename,
62292 if (retval < 0)
62293 goto out;
62294
62295+#ifdef CONFIG_GRKERNSEC
62296+ old_acl = current->acl;
62297+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
62298+ old_exec_file = current->exec_file;
62299+ get_file(file);
62300+ current->exec_file = file;
62301+#endif
62302+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62303+ /* limit suid stack to 8MB
62304+ * we saved the old limits above and will restore them if this exec fails
62305+ */
62306+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
62307+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
62308+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
62309+#endif
62310+
62311+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
62312+ retval = -EPERM;
62313+ goto out_fail;
62314+ }
62315+
62316+ if (!gr_tpe_allow(file)) {
62317+ retval = -EACCES;
62318+ goto out_fail;
62319+ }
62320+
62321+ if (gr_check_crash_exec(file)) {
62322+ retval = -EACCES;
62323+ goto out_fail;
62324+ }
62325+
62326+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
62327+ bprm->unsafe);
62328+ if (retval < 0)
62329+ goto out_fail;
62330+
62331 retval = copy_strings_kernel(1, &bprm->filename, bprm);
62332 if (retval < 0)
62333- goto out;
62334+ goto out_fail;
62335
62336 bprm->exec = bprm->p;
62337 retval = copy_strings(bprm->envc, envp, bprm);
62338 if (retval < 0)
62339- goto out;
62340+ goto out_fail;
62341
62342 retval = copy_strings(bprm->argc, argv, bprm);
62343 if (retval < 0)
62344- goto out;
62345+ goto out_fail;
62346+
62347+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
62348+
62349+ gr_handle_exec_args(bprm, argv);
62350
62351 retval = exec_binprm(bprm);
62352 if (retval < 0)
62353- goto out;
62354+ goto out_fail;
62355+#ifdef CONFIG_GRKERNSEC
62356+ if (old_exec_file)
62357+ fput(old_exec_file);
62358+#endif
62359
62360 /* execve succeeded */
62361+
62362+ increment_exec_counter();
62363 current->fs->in_exec = 0;
62364 current->in_execve = 0;
62365 acct_update_integrals(current);
62366@@ -1584,6 +1755,14 @@ static int do_execveat_common(int fd, struct filename *filename,
62367 put_files_struct(displaced);
62368 return retval;
62369
62370+out_fail:
62371+#ifdef CONFIG_GRKERNSEC
62372+ current->acl = old_acl;
62373+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
62374+ fput(current->exec_file);
62375+ current->exec_file = old_exec_file;
62376+#endif
62377+
62378 out:
62379 if (bprm->mm) {
62380 acct_arg_size(bprm, 0);
62381@@ -1730,3 +1909,312 @@ COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
62382 argv, envp, flags);
62383 }
62384 #endif
62385+
62386+int pax_check_flags(unsigned long *flags)
62387+{
62388+ int retval = 0;
62389+
62390+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
62391+ if (*flags & MF_PAX_SEGMEXEC)
62392+ {
62393+ *flags &= ~MF_PAX_SEGMEXEC;
62394+ retval = -EINVAL;
62395+ }
62396+#endif
62397+
62398+ if ((*flags & MF_PAX_PAGEEXEC)
62399+
62400+#ifdef CONFIG_PAX_PAGEEXEC
62401+ && (*flags & MF_PAX_SEGMEXEC)
62402+#endif
62403+
62404+ )
62405+ {
62406+ *flags &= ~MF_PAX_PAGEEXEC;
62407+ retval = -EINVAL;
62408+ }
62409+
62410+ if ((*flags & MF_PAX_MPROTECT)
62411+
62412+#ifdef CONFIG_PAX_MPROTECT
62413+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
62414+#endif
62415+
62416+ )
62417+ {
62418+ *flags &= ~MF_PAX_MPROTECT;
62419+ retval = -EINVAL;
62420+ }
62421+
62422+ if ((*flags & MF_PAX_EMUTRAMP)
62423+
62424+#ifdef CONFIG_PAX_EMUTRAMP
62425+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
62426+#endif
62427+
62428+ )
62429+ {
62430+ *flags &= ~MF_PAX_EMUTRAMP;
62431+ retval = -EINVAL;
62432+ }
62433+
62434+ return retval;
62435+}
62436+
62437+EXPORT_SYMBOL(pax_check_flags);
62438+
62439+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
62440+char *pax_get_path(const struct path *path, char *buf, int buflen)
62441+{
62442+ char *pathname = d_path(path, buf, buflen);
62443+
62444+ if (IS_ERR(pathname))
62445+ goto toolong;
62446+
62447+ pathname = mangle_path(buf, pathname, "\t\n\\");
62448+ if (!pathname)
62449+ goto toolong;
62450+
62451+ *pathname = 0;
62452+ return buf;
62453+
62454+toolong:
62455+ return "<path too long>";
62456+}
62457+EXPORT_SYMBOL(pax_get_path);
62458+
62459+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
62460+{
62461+ struct task_struct *tsk = current;
62462+ struct mm_struct *mm = current->mm;
62463+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
62464+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
62465+ char *path_exec = NULL;
62466+ char *path_fault = NULL;
62467+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
62468+ siginfo_t info = { };
62469+
62470+ if (buffer_exec && buffer_fault) {
62471+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
62472+
62473+ down_read(&mm->mmap_sem);
62474+ vma = mm->mmap;
62475+ while (vma && (!vma_exec || !vma_fault)) {
62476+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
62477+ vma_exec = vma;
62478+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
62479+ vma_fault = vma;
62480+ vma = vma->vm_next;
62481+ }
62482+ if (vma_exec)
62483+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
62484+ if (vma_fault) {
62485+ start = vma_fault->vm_start;
62486+ end = vma_fault->vm_end;
62487+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
62488+ if (vma_fault->vm_file)
62489+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
62490+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
62491+ path_fault = "<heap>";
62492+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
62493+ path_fault = "<stack>";
62494+ else
62495+ path_fault = "<anonymous mapping>";
62496+ }
62497+ up_read(&mm->mmap_sem);
62498+ }
62499+ if (tsk->signal->curr_ip)
62500+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
62501+ else
62502+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
62503+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
62504+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
62505+ free_page((unsigned long)buffer_exec);
62506+ free_page((unsigned long)buffer_fault);
62507+ pax_report_insns(regs, pc, sp);
62508+ info.si_signo = SIGKILL;
62509+ info.si_errno = 0;
62510+ info.si_code = SI_KERNEL;
62511+ info.si_pid = 0;
62512+ info.si_uid = 0;
62513+ do_coredump(&info);
62514+}
62515+#endif
62516+
62517+#ifdef CONFIG_PAX_REFCOUNT
62518+void pax_report_refcount_overflow(struct pt_regs *regs)
62519+{
62520+ if (current->signal->curr_ip)
62521+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
62522+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
62523+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
62524+ else
62525+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
62526+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
62527+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
62528+ preempt_disable();
62529+ show_regs(regs);
62530+ preempt_enable();
62531+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
62532+}
62533+#endif
62534+
62535+#ifdef CONFIG_PAX_USERCOPY
62536+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
62537+static noinline int check_stack_object(const void *obj, unsigned long len)
62538+{
62539+ const void * const stack = task_stack_page(current);
62540+ const void * const stackend = stack + THREAD_SIZE;
62541+
62542+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
62543+ const void *frame = NULL;
62544+ const void *oldframe;
62545+#endif
62546+
62547+ if (obj + len < obj)
62548+ return -1;
62549+
62550+ if (obj + len <= stack || stackend <= obj)
62551+ return 0;
62552+
62553+ if (obj < stack || stackend < obj + len)
62554+ return -1;
62555+
62556+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
62557+ oldframe = __builtin_frame_address(1);
62558+ if (oldframe)
62559+ frame = __builtin_frame_address(2);
62560+ /*
62561+ low ----------------------------------------------> high
62562+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
62563+ ^----------------^
62564+ allow copies only within here
62565+ */
62566+ while (stack <= frame && frame < stackend) {
62567+ /* if obj + len extends past the last frame, this
62568+ check won't pass and the next frame will be 0,
62569+ causing us to bail out and correctly report
62570+ the copy as invalid
62571+ */
62572+ if (obj + len <= frame)
62573+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
62574+ oldframe = frame;
62575+ frame = *(const void * const *)frame;
62576+ }
62577+ return -1;
62578+#else
62579+ return 1;
62580+#endif
62581+}
62582+
62583+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
62584+{
62585+ if (current->signal->curr_ip)
62586+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
62587+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
62588+ else
62589+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
62590+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
62591+ dump_stack();
62592+ gr_handle_kernel_exploit();
62593+ do_group_exit(SIGKILL);
62594+}
62595+#endif
62596+
62597+#ifdef CONFIG_PAX_USERCOPY
62598+
62599+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
62600+{
62601+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
62602+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
62603+#ifdef CONFIG_MODULES
62604+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
62605+#else
62606+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
62607+#endif
62608+
62609+#else
62610+ unsigned long textlow = (unsigned long)_stext;
62611+ unsigned long texthigh = (unsigned long)_etext;
62612+
62613+#ifdef CONFIG_X86_64
62614+ /* check against linear mapping as well */
62615+ if (high > (unsigned long)__va(__pa(textlow)) &&
62616+ low < (unsigned long)__va(__pa(texthigh)))
62617+ return true;
62618+#endif
62619+
62620+#endif
62621+
62622+ if (high <= textlow || low >= texthigh)
62623+ return false;
62624+ else
62625+ return true;
62626+}
62627+#endif
62628+
62629+void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size)
62630+{
62631+#ifdef CONFIG_PAX_USERCOPY
62632+ const char *type;
62633+#endif
62634+
62635+#if !defined(CONFIG_STACK_GROWSUP) && !defined(CONFIG_X86_64)
62636+ unsigned long stackstart = (unsigned long)task_stack_page(current);
62637+ unsigned long currentsp = (unsigned long)&stackstart;
62638+ if (unlikely((currentsp < stackstart + 512 ||
62639+ currentsp >= stackstart + THREAD_SIZE) && !in_interrupt()))
62640+ BUG();
62641+#endif
62642+
62643+#ifndef CONFIG_PAX_USERCOPY_DEBUG
62644+ if (const_size)
62645+ return;
62646+#endif
62647+
62648+#ifdef CONFIG_PAX_USERCOPY
62649+ if (!n)
62650+ return;
62651+
62652+ type = check_heap_object(ptr, n);
62653+ if (!type) {
62654+ int ret = check_stack_object(ptr, n);
62655+ if (ret == 1 || ret == 2)
62656+ return;
62657+ if (ret == 0) {
62658+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
62659+ type = "<kernel text>";
62660+ else
62661+ return;
62662+ } else
62663+ type = "<process stack>";
62664+ }
62665+
62666+ pax_report_usercopy(ptr, n, to_user, type);
62667+#endif
62668+
62669+}
62670+EXPORT_SYMBOL(__check_object_size);
62671+
62672+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
62673+void pax_track_stack(void)
62674+{
62675+ unsigned long sp = (unsigned long)&sp;
62676+ if (sp < current_thread_info()->lowest_stack &&
62677+ sp >= (unsigned long)task_stack_page(current) + 2 * sizeof(unsigned long))
62678+ current_thread_info()->lowest_stack = sp;
62679+ if (unlikely((sp & ~(THREAD_SIZE - 1)) < (THREAD_SIZE/16)))
62680+ BUG();
62681+}
62682+EXPORT_SYMBOL(pax_track_stack);
62683+#endif
62684+
62685+#ifdef CONFIG_PAX_SIZE_OVERFLOW
62686+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
62687+{
62688+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
62689+ dump_stack();
62690+ do_group_exit(SIGKILL);
62691+}
62692+EXPORT_SYMBOL(report_size_overflow);
62693+#endif
62694diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
62695index 9f9992b..8b59411 100644
62696--- a/fs/ext2/balloc.c
62697+++ b/fs/ext2/balloc.c
62698@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
62699
62700 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
62701 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
62702- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
62703+ if (free_blocks < root_blocks + 1 &&
62704 !uid_eq(sbi->s_resuid, current_fsuid()) &&
62705 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
62706- !in_group_p (sbi->s_resgid))) {
62707+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
62708 return 0;
62709 }
62710 return 1;
62711diff --git a/fs/ext2/super.c b/fs/ext2/super.c
62712index d0e746e..82e06f0 100644
62713--- a/fs/ext2/super.c
62714+++ b/fs/ext2/super.c
62715@@ -267,10 +267,8 @@ static int ext2_show_options(struct seq_file *seq, struct dentry *root)
62716 #ifdef CONFIG_EXT2_FS_XATTR
62717 if (test_opt(sb, XATTR_USER))
62718 seq_puts(seq, ",user_xattr");
62719- if (!test_opt(sb, XATTR_USER) &&
62720- (def_mount_opts & EXT2_DEFM_XATTR_USER)) {
62721+ if (!test_opt(sb, XATTR_USER))
62722 seq_puts(seq, ",nouser_xattr");
62723- }
62724 #endif
62725
62726 #ifdef CONFIG_EXT2_FS_POSIX_ACL
62727@@ -856,8 +854,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
62728 if (def_mount_opts & EXT2_DEFM_UID16)
62729 set_opt(sbi->s_mount_opt, NO_UID32);
62730 #ifdef CONFIG_EXT2_FS_XATTR
62731- if (def_mount_opts & EXT2_DEFM_XATTR_USER)
62732- set_opt(sbi->s_mount_opt, XATTR_USER);
62733+ /* always enable user xattrs */
62734+ set_opt(sbi->s_mount_opt, XATTR_USER);
62735 #endif
62736 #ifdef CONFIG_EXT2_FS_POSIX_ACL
62737 if (def_mount_opts & EXT2_DEFM_ACL)
62738diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
62739index 9142614..97484fa 100644
62740--- a/fs/ext2/xattr.c
62741+++ b/fs/ext2/xattr.c
62742@@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
62743 struct buffer_head *bh = NULL;
62744 struct ext2_xattr_entry *entry;
62745 char *end;
62746- size_t rest = buffer_size;
62747+ size_t rest = buffer_size, total_size = 0;
62748 int error;
62749
62750 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
62751@@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
62752 buffer += size;
62753 }
62754 rest -= size;
62755+ total_size += size;
62756 }
62757 }
62758- error = buffer_size - rest; /* total size */
62759+ error = total_size;
62760
62761 cleanup:
62762 brelse(bh);
62763diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
62764index 158b5d4..2432610 100644
62765--- a/fs/ext3/balloc.c
62766+++ b/fs/ext3/balloc.c
62767@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
62768
62769 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
62770 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
62771- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
62772+ if (free_blocks < root_blocks + 1 &&
62773 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
62774 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
62775- !in_group_p (sbi->s_resgid))) {
62776+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
62777 return 0;
62778 }
62779 return 1;
62780diff --git a/fs/ext3/super.c b/fs/ext3/super.c
62781index d4dbf3c..906a6fb 100644
62782--- a/fs/ext3/super.c
62783+++ b/fs/ext3/super.c
62784@@ -655,10 +655,8 @@ static int ext3_show_options(struct seq_file *seq, struct dentry *root)
62785 #ifdef CONFIG_EXT3_FS_XATTR
62786 if (test_opt(sb, XATTR_USER))
62787 seq_puts(seq, ",user_xattr");
62788- if (!test_opt(sb, XATTR_USER) &&
62789- (def_mount_opts & EXT3_DEFM_XATTR_USER)) {
62790+ if (!test_opt(sb, XATTR_USER))
62791 seq_puts(seq, ",nouser_xattr");
62792- }
62793 #endif
62794 #ifdef CONFIG_EXT3_FS_POSIX_ACL
62795 if (test_opt(sb, POSIX_ACL))
62796@@ -1760,8 +1758,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
62797 if (def_mount_opts & EXT3_DEFM_UID16)
62798 set_opt(sbi->s_mount_opt, NO_UID32);
62799 #ifdef CONFIG_EXT3_FS_XATTR
62800- if (def_mount_opts & EXT3_DEFM_XATTR_USER)
62801- set_opt(sbi->s_mount_opt, XATTR_USER);
62802+ /* always enable user xattrs */
62803+ set_opt(sbi->s_mount_opt, XATTR_USER);
62804 #endif
62805 #ifdef CONFIG_EXT3_FS_POSIX_ACL
62806 if (def_mount_opts & EXT3_DEFM_ACL)
62807diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
62808index c6874be..f8a6ae8 100644
62809--- a/fs/ext3/xattr.c
62810+++ b/fs/ext3/xattr.c
62811@@ -330,7 +330,7 @@ static int
62812 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
62813 char *buffer, size_t buffer_size)
62814 {
62815- size_t rest = buffer_size;
62816+ size_t rest = buffer_size, total_size = 0;
62817
62818 for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
62819 const struct xattr_handler *handler =
62820@@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
62821 buffer += size;
62822 }
62823 rest -= size;
62824+ total_size += size;
62825 }
62826 }
62827- return buffer_size - rest;
62828+ return total_size;
62829 }
62830
62831 static int
62832diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
62833index 83a6f49..d4e4d03 100644
62834--- a/fs/ext4/balloc.c
62835+++ b/fs/ext4/balloc.c
62836@@ -557,8 +557,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
62837 /* Hm, nope. Are (enough) root reserved clusters available? */
62838 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
62839 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
62840- capable(CAP_SYS_RESOURCE) ||
62841- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
62842+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
62843+ capable_nolog(CAP_SYS_RESOURCE)) {
62844
62845 if (free_clusters >= (nclusters + dirty_clusters +
62846 resv_clusters))
62847diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
62848index f63c3d5..3c1a033 100644
62849--- a/fs/ext4/ext4.h
62850+++ b/fs/ext4/ext4.h
62851@@ -1287,19 +1287,19 @@ struct ext4_sb_info {
62852 unsigned long s_mb_last_start;
62853
62854 /* stats for buddy allocator */
62855- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
62856- atomic_t s_bal_success; /* we found long enough chunks */
62857- atomic_t s_bal_allocated; /* in blocks */
62858- atomic_t s_bal_ex_scanned; /* total extents scanned */
62859- atomic_t s_bal_goals; /* goal hits */
62860- atomic_t s_bal_breaks; /* too long searches */
62861- atomic_t s_bal_2orders; /* 2^order hits */
62862+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
62863+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
62864+ atomic_unchecked_t s_bal_allocated; /* in blocks */
62865+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
62866+ atomic_unchecked_t s_bal_goals; /* goal hits */
62867+ atomic_unchecked_t s_bal_breaks; /* too long searches */
62868+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
62869 spinlock_t s_bal_lock;
62870 unsigned long s_mb_buddies_generated;
62871 unsigned long long s_mb_generation_time;
62872- atomic_t s_mb_lost_chunks;
62873- atomic_t s_mb_preallocated;
62874- atomic_t s_mb_discarded;
62875+ atomic_unchecked_t s_mb_lost_chunks;
62876+ atomic_unchecked_t s_mb_preallocated;
62877+ atomic_unchecked_t s_mb_discarded;
62878 atomic_t s_lock_busy;
62879
62880 /* locality groups */
62881diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
62882index 8d1e602..abf497b 100644
62883--- a/fs/ext4/mballoc.c
62884+++ b/fs/ext4/mballoc.c
62885@@ -1901,7 +1901,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
62886 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
62887
62888 if (EXT4_SB(sb)->s_mb_stats)
62889- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
62890+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
62891
62892 break;
62893 }
62894@@ -2211,7 +2211,7 @@ repeat:
62895 ac->ac_status = AC_STATUS_CONTINUE;
62896 ac->ac_flags |= EXT4_MB_HINT_FIRST;
62897 cr = 3;
62898- atomic_inc(&sbi->s_mb_lost_chunks);
62899+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
62900 goto repeat;
62901 }
62902 }
62903@@ -2716,25 +2716,25 @@ int ext4_mb_release(struct super_block *sb)
62904 if (sbi->s_mb_stats) {
62905 ext4_msg(sb, KERN_INFO,
62906 "mballoc: %u blocks %u reqs (%u success)",
62907- atomic_read(&sbi->s_bal_allocated),
62908- atomic_read(&sbi->s_bal_reqs),
62909- atomic_read(&sbi->s_bal_success));
62910+ atomic_read_unchecked(&sbi->s_bal_allocated),
62911+ atomic_read_unchecked(&sbi->s_bal_reqs),
62912+ atomic_read_unchecked(&sbi->s_bal_success));
62913 ext4_msg(sb, KERN_INFO,
62914 "mballoc: %u extents scanned, %u goal hits, "
62915 "%u 2^N hits, %u breaks, %u lost",
62916- atomic_read(&sbi->s_bal_ex_scanned),
62917- atomic_read(&sbi->s_bal_goals),
62918- atomic_read(&sbi->s_bal_2orders),
62919- atomic_read(&sbi->s_bal_breaks),
62920- atomic_read(&sbi->s_mb_lost_chunks));
62921+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
62922+ atomic_read_unchecked(&sbi->s_bal_goals),
62923+ atomic_read_unchecked(&sbi->s_bal_2orders),
62924+ atomic_read_unchecked(&sbi->s_bal_breaks),
62925+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
62926 ext4_msg(sb, KERN_INFO,
62927 "mballoc: %lu generated and it took %Lu",
62928 sbi->s_mb_buddies_generated,
62929 sbi->s_mb_generation_time);
62930 ext4_msg(sb, KERN_INFO,
62931 "mballoc: %u preallocated, %u discarded",
62932- atomic_read(&sbi->s_mb_preallocated),
62933- atomic_read(&sbi->s_mb_discarded));
62934+ atomic_read_unchecked(&sbi->s_mb_preallocated),
62935+ atomic_read_unchecked(&sbi->s_mb_discarded));
62936 }
62937
62938 free_percpu(sbi->s_locality_groups);
62939@@ -3190,16 +3190,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
62940 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
62941
62942 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
62943- atomic_inc(&sbi->s_bal_reqs);
62944- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
62945+ atomic_inc_unchecked(&sbi->s_bal_reqs);
62946+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
62947 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
62948- atomic_inc(&sbi->s_bal_success);
62949- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
62950+ atomic_inc_unchecked(&sbi->s_bal_success);
62951+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
62952 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
62953 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
62954- atomic_inc(&sbi->s_bal_goals);
62955+ atomic_inc_unchecked(&sbi->s_bal_goals);
62956 if (ac->ac_found > sbi->s_mb_max_to_scan)
62957- atomic_inc(&sbi->s_bal_breaks);
62958+ atomic_inc_unchecked(&sbi->s_bal_breaks);
62959 }
62960
62961 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
62962@@ -3626,7 +3626,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
62963 trace_ext4_mb_new_inode_pa(ac, pa);
62964
62965 ext4_mb_use_inode_pa(ac, pa);
62966- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
62967+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
62968
62969 ei = EXT4_I(ac->ac_inode);
62970 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
62971@@ -3686,7 +3686,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
62972 trace_ext4_mb_new_group_pa(ac, pa);
62973
62974 ext4_mb_use_group_pa(ac, pa);
62975- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
62976+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
62977
62978 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
62979 lg = ac->ac_lg;
62980@@ -3775,7 +3775,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
62981 * from the bitmap and continue.
62982 */
62983 }
62984- atomic_add(free, &sbi->s_mb_discarded);
62985+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
62986
62987 return err;
62988 }
62989@@ -3793,7 +3793,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
62990 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
62991 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
62992 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
62993- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
62994+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
62995 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
62996
62997 return 0;
62998diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
62999index 8313ca3..8a37d08 100644
63000--- a/fs/ext4/mmp.c
63001+++ b/fs/ext4/mmp.c
63002@@ -111,7 +111,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
63003 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
63004 const char *function, unsigned int line, const char *msg)
63005 {
63006- __ext4_warning(sb, function, line, msg);
63007+ __ext4_warning(sb, function, line, "%s", msg);
63008 __ext4_warning(sb, function, line,
63009 "MMP failure info: last update time: %llu, last update "
63010 "node: %s, last update device: %s\n",
63011diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
63012index 8a8ec62..1b02de5 100644
63013--- a/fs/ext4/resize.c
63014+++ b/fs/ext4/resize.c
63015@@ -413,7 +413,7 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
63016
63017 ext4_debug("mark blocks [%llu/%u] used\n", block, count);
63018 for (count2 = count; count > 0; count -= count2, block += count2) {
63019- ext4_fsblk_t start;
63020+ ext4_fsblk_t start, diff;
63021 struct buffer_head *bh;
63022 ext4_group_t group;
63023 int err;
63024@@ -422,10 +422,6 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
63025 start = ext4_group_first_block_no(sb, group);
63026 group -= flex_gd->groups[0].group;
63027
63028- count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start);
63029- if (count2 > count)
63030- count2 = count;
63031-
63032 if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
63033 BUG_ON(flex_gd->count > 1);
63034 continue;
63035@@ -443,9 +439,15 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
63036 err = ext4_journal_get_write_access(handle, bh);
63037 if (err)
63038 return err;
63039+
63040+ diff = block - start;
63041+ count2 = EXT4_BLOCKS_PER_GROUP(sb) - diff;
63042+ if (count2 > count)
63043+ count2 = count;
63044+
63045 ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", block,
63046- block - start, count2);
63047- ext4_set_bits(bh->b_data, block - start, count2);
63048+ diff, count2);
63049+ ext4_set_bits(bh->b_data, diff, count2);
63050
63051 err = ext4_handle_dirty_metadata(handle, NULL, bh);
63052 if (unlikely(err))
63053diff --git a/fs/ext4/super.c b/fs/ext4/super.c
63054index e061e66..87bc092 100644
63055--- a/fs/ext4/super.c
63056+++ b/fs/ext4/super.c
63057@@ -1243,7 +1243,7 @@ static ext4_fsblk_t get_sb_block(void **data)
63058 }
63059
63060 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
63061-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
63062+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
63063 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
63064
63065 #ifdef CONFIG_QUOTA
63066@@ -2443,7 +2443,7 @@ struct ext4_attr {
63067 int offset;
63068 int deprecated_val;
63069 } u;
63070-};
63071+} __do_const;
63072
63073 static int parse_strtoull(const char *buf,
63074 unsigned long long max, unsigned long long *value)
63075diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
63076index 1e09fc7..0400dd4 100644
63077--- a/fs/ext4/xattr.c
63078+++ b/fs/ext4/xattr.c
63079@@ -399,7 +399,7 @@ static int
63080 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
63081 char *buffer, size_t buffer_size)
63082 {
63083- size_t rest = buffer_size;
63084+ size_t rest = buffer_size, total_size = 0;
63085
63086 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
63087 const struct xattr_handler *handler =
63088@@ -416,9 +416,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
63089 buffer += size;
63090 }
63091 rest -= size;
63092+ total_size += size;
63093 }
63094 }
63095- return buffer_size - rest;
63096+ return total_size;
63097 }
63098
63099 static int
63100diff --git a/fs/fcntl.c b/fs/fcntl.c
63101index ee85cd4..9dd0d20 100644
63102--- a/fs/fcntl.c
63103+++ b/fs/fcntl.c
63104@@ -102,6 +102,10 @@ void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
63105 int force)
63106 {
63107 security_file_set_fowner(filp);
63108+ if (gr_handle_chroot_fowner(pid, type))
63109+ return;
63110+ if (gr_check_protected_task_fowner(pid, type))
63111+ return;
63112 f_modown(filp, pid, type, force);
63113 }
63114 EXPORT_SYMBOL(__f_setown);
63115diff --git a/fs/fhandle.c b/fs/fhandle.c
63116index 999ff5c..2281df9 100644
63117--- a/fs/fhandle.c
63118+++ b/fs/fhandle.c
63119@@ -8,6 +8,7 @@
63120 #include <linux/fs_struct.h>
63121 #include <linux/fsnotify.h>
63122 #include <linux/personality.h>
63123+#include <linux/grsecurity.h>
63124 #include <asm/uaccess.h>
63125 #include "internal.h"
63126 #include "mount.h"
63127@@ -67,8 +68,7 @@ static long do_sys_name_to_handle(struct path *path,
63128 } else
63129 retval = 0;
63130 /* copy the mount id */
63131- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
63132- sizeof(*mnt_id)) ||
63133+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
63134 copy_to_user(ufh, handle,
63135 sizeof(struct file_handle) + handle_bytes))
63136 retval = -EFAULT;
63137@@ -175,7 +175,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
63138 * the directory. Ideally we would like CAP_DAC_SEARCH.
63139 * But we don't have that
63140 */
63141- if (!capable(CAP_DAC_READ_SEARCH)) {
63142+ if (!capable(CAP_DAC_READ_SEARCH) || !gr_chroot_fhandle()) {
63143 retval = -EPERM;
63144 goto out_err;
63145 }
63146@@ -195,8 +195,9 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
63147 goto out_err;
63148 }
63149 /* copy the full handle */
63150- if (copy_from_user(handle, ufh,
63151- sizeof(struct file_handle) +
63152+ *handle = f_handle;
63153+ if (copy_from_user(&handle->f_handle,
63154+ &ufh->f_handle,
63155 f_handle.handle_bytes)) {
63156 retval = -EFAULT;
63157 goto out_handle;
63158diff --git a/fs/file.c b/fs/file.c
63159index ee738ea..f6c15629 100644
63160--- a/fs/file.c
63161+++ b/fs/file.c
63162@@ -16,6 +16,7 @@
63163 #include <linux/slab.h>
63164 #include <linux/vmalloc.h>
63165 #include <linux/file.h>
63166+#include <linux/security.h>
63167 #include <linux/fdtable.h>
63168 #include <linux/bitops.h>
63169 #include <linux/interrupt.h>
63170@@ -139,7 +140,7 @@ out:
63171 * Return <0 error code on error; 1 on successful completion.
63172 * The files->file_lock should be held on entry, and will be held on exit.
63173 */
63174-static int expand_fdtable(struct files_struct *files, int nr)
63175+static int expand_fdtable(struct files_struct *files, unsigned int nr)
63176 __releases(files->file_lock)
63177 __acquires(files->file_lock)
63178 {
63179@@ -184,7 +185,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
63180 * expanded and execution may have blocked.
63181 * The files->file_lock should be held on entry, and will be held on exit.
63182 */
63183-static int expand_files(struct files_struct *files, int nr)
63184+static int expand_files(struct files_struct *files, unsigned int nr)
63185 {
63186 struct fdtable *fdt;
63187
63188@@ -800,6 +801,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
63189 if (!file)
63190 return __close_fd(files, fd);
63191
63192+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
63193 if (fd >= rlimit(RLIMIT_NOFILE))
63194 return -EBADF;
63195
63196@@ -826,6 +828,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
63197 if (unlikely(oldfd == newfd))
63198 return -EINVAL;
63199
63200+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
63201 if (newfd >= rlimit(RLIMIT_NOFILE))
63202 return -EBADF;
63203
63204@@ -881,6 +884,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
63205 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
63206 {
63207 int err;
63208+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
63209 if (from >= rlimit(RLIMIT_NOFILE))
63210 return -EINVAL;
63211 err = alloc_fd(from, flags);
63212diff --git a/fs/filesystems.c b/fs/filesystems.c
63213index 5797d45..7d7d79a 100644
63214--- a/fs/filesystems.c
63215+++ b/fs/filesystems.c
63216@@ -275,7 +275,11 @@ struct file_system_type *get_fs_type(const char *name)
63217 int len = dot ? dot - name : strlen(name);
63218
63219 fs = __get_fs_type(name, len);
63220+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63221+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
63222+#else
63223 if (!fs && (request_module("fs-%.*s", len, name) == 0))
63224+#endif
63225 fs = __get_fs_type(name, len);
63226
63227 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
63228diff --git a/fs/fs_struct.c b/fs/fs_struct.c
63229index 7dca743..1ff87ae 100644
63230--- a/fs/fs_struct.c
63231+++ b/fs/fs_struct.c
63232@@ -4,6 +4,7 @@
63233 #include <linux/path.h>
63234 #include <linux/slab.h>
63235 #include <linux/fs_struct.h>
63236+#include <linux/grsecurity.h>
63237 #include "internal.h"
63238
63239 /*
63240@@ -15,14 +16,18 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
63241 struct path old_root;
63242
63243 path_get(path);
63244+ gr_inc_chroot_refcnts(path->dentry, path->mnt);
63245 spin_lock(&fs->lock);
63246 write_seqcount_begin(&fs->seq);
63247 old_root = fs->root;
63248 fs->root = *path;
63249+ gr_set_chroot_entries(current, path);
63250 write_seqcount_end(&fs->seq);
63251 spin_unlock(&fs->lock);
63252- if (old_root.dentry)
63253+ if (old_root.dentry) {
63254+ gr_dec_chroot_refcnts(old_root.dentry, old_root.mnt);
63255 path_put(&old_root);
63256+ }
63257 }
63258
63259 /*
63260@@ -67,6 +72,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
63261 int hits = 0;
63262 spin_lock(&fs->lock);
63263 write_seqcount_begin(&fs->seq);
63264+ /* this root replacement is only done by pivot_root,
63265+ leave grsec's chroot tagging alone for this task
63266+ so that a pivoted root isn't treated as a chroot
63267+ */
63268 hits += replace_path(&fs->root, old_root, new_root);
63269 hits += replace_path(&fs->pwd, old_root, new_root);
63270 write_seqcount_end(&fs->seq);
63271@@ -85,6 +94,7 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
63272
63273 void free_fs_struct(struct fs_struct *fs)
63274 {
63275+ gr_dec_chroot_refcnts(fs->root.dentry, fs->root.mnt);
63276 path_put(&fs->root);
63277 path_put(&fs->pwd);
63278 kmem_cache_free(fs_cachep, fs);
63279@@ -99,7 +109,8 @@ void exit_fs(struct task_struct *tsk)
63280 task_lock(tsk);
63281 spin_lock(&fs->lock);
63282 tsk->fs = NULL;
63283- kill = !--fs->users;
63284+ gr_clear_chroot_entries(tsk);
63285+ kill = !atomic_dec_return(&fs->users);
63286 spin_unlock(&fs->lock);
63287 task_unlock(tsk);
63288 if (kill)
63289@@ -112,7 +123,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
63290 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
63291 /* We don't need to lock fs - think why ;-) */
63292 if (fs) {
63293- fs->users = 1;
63294+ atomic_set(&fs->users, 1);
63295 fs->in_exec = 0;
63296 spin_lock_init(&fs->lock);
63297 seqcount_init(&fs->seq);
63298@@ -121,9 +132,13 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
63299 spin_lock(&old->lock);
63300 fs->root = old->root;
63301 path_get(&fs->root);
63302+ /* instead of calling gr_set_chroot_entries here,
63303+ we call it from every caller of this function
63304+ */
63305 fs->pwd = old->pwd;
63306 path_get(&fs->pwd);
63307 spin_unlock(&old->lock);
63308+ gr_inc_chroot_refcnts(fs->root.dentry, fs->root.mnt);
63309 }
63310 return fs;
63311 }
63312@@ -139,8 +154,9 @@ int unshare_fs_struct(void)
63313
63314 task_lock(current);
63315 spin_lock(&fs->lock);
63316- kill = !--fs->users;
63317+ kill = !atomic_dec_return(&fs->users);
63318 current->fs = new_fs;
63319+ gr_set_chroot_entries(current, &new_fs->root);
63320 spin_unlock(&fs->lock);
63321 task_unlock(current);
63322
63323@@ -153,13 +169,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
63324
63325 int current_umask(void)
63326 {
63327- return current->fs->umask;
63328+ return current->fs->umask | gr_acl_umask();
63329 }
63330 EXPORT_SYMBOL(current_umask);
63331
63332 /* to be mentioned only in INIT_TASK */
63333 struct fs_struct init_fs = {
63334- .users = 1,
63335+ .users = ATOMIC_INIT(1),
63336 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
63337 .seq = SEQCNT_ZERO(init_fs.seq),
63338 .umask = 0022,
63339diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
63340index 89acec7..a575262 100644
63341--- a/fs/fscache/cookie.c
63342+++ b/fs/fscache/cookie.c
63343@@ -19,7 +19,7 @@
63344
63345 struct kmem_cache *fscache_cookie_jar;
63346
63347-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
63348+static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
63349
63350 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
63351 static int fscache_alloc_object(struct fscache_cache *cache,
63352@@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
63353 parent ? (char *) parent->def->name : "<no-parent>",
63354 def->name, netfs_data, enable);
63355
63356- fscache_stat(&fscache_n_acquires);
63357+ fscache_stat_unchecked(&fscache_n_acquires);
63358
63359 /* if there's no parent cookie, then we don't create one here either */
63360 if (!parent) {
63361- fscache_stat(&fscache_n_acquires_null);
63362+ fscache_stat_unchecked(&fscache_n_acquires_null);
63363 _leave(" [no parent]");
63364 return NULL;
63365 }
63366@@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
63367 /* allocate and initialise a cookie */
63368 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
63369 if (!cookie) {
63370- fscache_stat(&fscache_n_acquires_oom);
63371+ fscache_stat_unchecked(&fscache_n_acquires_oom);
63372 _leave(" [ENOMEM]");
63373 return NULL;
63374 }
63375@@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
63376
63377 switch (cookie->def->type) {
63378 case FSCACHE_COOKIE_TYPE_INDEX:
63379- fscache_stat(&fscache_n_cookie_index);
63380+ fscache_stat_unchecked(&fscache_n_cookie_index);
63381 break;
63382 case FSCACHE_COOKIE_TYPE_DATAFILE:
63383- fscache_stat(&fscache_n_cookie_data);
63384+ fscache_stat_unchecked(&fscache_n_cookie_data);
63385 break;
63386 default:
63387- fscache_stat(&fscache_n_cookie_special);
63388+ fscache_stat_unchecked(&fscache_n_cookie_special);
63389 break;
63390 }
63391
63392@@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
63393 } else {
63394 atomic_dec(&parent->n_children);
63395 __fscache_cookie_put(cookie);
63396- fscache_stat(&fscache_n_acquires_nobufs);
63397+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
63398 _leave(" = NULL");
63399 return NULL;
63400 }
63401@@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
63402 }
63403 }
63404
63405- fscache_stat(&fscache_n_acquires_ok);
63406+ fscache_stat_unchecked(&fscache_n_acquires_ok);
63407 _leave(" = %p", cookie);
63408 return cookie;
63409 }
63410@@ -213,7 +213,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
63411 cache = fscache_select_cache_for_object(cookie->parent);
63412 if (!cache) {
63413 up_read(&fscache_addremove_sem);
63414- fscache_stat(&fscache_n_acquires_no_cache);
63415+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
63416 _leave(" = -ENOMEDIUM [no cache]");
63417 return -ENOMEDIUM;
63418 }
63419@@ -297,14 +297,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
63420 object = cache->ops->alloc_object(cache, cookie);
63421 fscache_stat_d(&fscache_n_cop_alloc_object);
63422 if (IS_ERR(object)) {
63423- fscache_stat(&fscache_n_object_no_alloc);
63424+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
63425 ret = PTR_ERR(object);
63426 goto error;
63427 }
63428
63429- fscache_stat(&fscache_n_object_alloc);
63430+ fscache_stat_unchecked(&fscache_n_object_alloc);
63431
63432- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
63433+ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
63434
63435 _debug("ALLOC OBJ%x: %s {%lx}",
63436 object->debug_id, cookie->def->name, object->events);
63437@@ -418,7 +418,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
63438
63439 _enter("{%s}", cookie->def->name);
63440
63441- fscache_stat(&fscache_n_invalidates);
63442+ fscache_stat_unchecked(&fscache_n_invalidates);
63443
63444 /* Only permit invalidation of data files. Invalidating an index will
63445 * require the caller to release all its attachments to the tree rooted
63446@@ -476,10 +476,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
63447 {
63448 struct fscache_object *object;
63449
63450- fscache_stat(&fscache_n_updates);
63451+ fscache_stat_unchecked(&fscache_n_updates);
63452
63453 if (!cookie) {
63454- fscache_stat(&fscache_n_updates_null);
63455+ fscache_stat_unchecked(&fscache_n_updates_null);
63456 _leave(" [no cookie]");
63457 return;
63458 }
63459@@ -580,12 +580,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie);
63460 */
63461 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
63462 {
63463- fscache_stat(&fscache_n_relinquishes);
63464+ fscache_stat_unchecked(&fscache_n_relinquishes);
63465 if (retire)
63466- fscache_stat(&fscache_n_relinquishes_retire);
63467+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
63468
63469 if (!cookie) {
63470- fscache_stat(&fscache_n_relinquishes_null);
63471+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
63472 _leave(" [no cookie]");
63473 return;
63474 }
63475@@ -686,7 +686,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
63476 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
63477 goto inconsistent;
63478
63479- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
63480+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
63481
63482 __fscache_use_cookie(cookie);
63483 if (fscache_submit_op(object, op) < 0)
63484diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
63485index 7872a62..d91b19f 100644
63486--- a/fs/fscache/internal.h
63487+++ b/fs/fscache/internal.h
63488@@ -137,8 +137,8 @@ extern void fscache_operation_gc(struct work_struct *);
63489 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
63490 extern int fscache_wait_for_operation_activation(struct fscache_object *,
63491 struct fscache_operation *,
63492- atomic_t *,
63493- atomic_t *,
63494+ atomic_unchecked_t *,
63495+ atomic_unchecked_t *,
63496 void (*)(struct fscache_operation *));
63497 extern void fscache_invalidate_writes(struct fscache_cookie *);
63498
63499@@ -157,101 +157,101 @@ extern void fscache_proc_cleanup(void);
63500 * stats.c
63501 */
63502 #ifdef CONFIG_FSCACHE_STATS
63503-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
63504-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
63505+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
63506+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
63507
63508-extern atomic_t fscache_n_op_pend;
63509-extern atomic_t fscache_n_op_run;
63510-extern atomic_t fscache_n_op_enqueue;
63511-extern atomic_t fscache_n_op_deferred_release;
63512-extern atomic_t fscache_n_op_release;
63513-extern atomic_t fscache_n_op_gc;
63514-extern atomic_t fscache_n_op_cancelled;
63515-extern atomic_t fscache_n_op_rejected;
63516+extern atomic_unchecked_t fscache_n_op_pend;
63517+extern atomic_unchecked_t fscache_n_op_run;
63518+extern atomic_unchecked_t fscache_n_op_enqueue;
63519+extern atomic_unchecked_t fscache_n_op_deferred_release;
63520+extern atomic_unchecked_t fscache_n_op_release;
63521+extern atomic_unchecked_t fscache_n_op_gc;
63522+extern atomic_unchecked_t fscache_n_op_cancelled;
63523+extern atomic_unchecked_t fscache_n_op_rejected;
63524
63525-extern atomic_t fscache_n_attr_changed;
63526-extern atomic_t fscache_n_attr_changed_ok;
63527-extern atomic_t fscache_n_attr_changed_nobufs;
63528-extern atomic_t fscache_n_attr_changed_nomem;
63529-extern atomic_t fscache_n_attr_changed_calls;
63530+extern atomic_unchecked_t fscache_n_attr_changed;
63531+extern atomic_unchecked_t fscache_n_attr_changed_ok;
63532+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
63533+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
63534+extern atomic_unchecked_t fscache_n_attr_changed_calls;
63535
63536-extern atomic_t fscache_n_allocs;
63537-extern atomic_t fscache_n_allocs_ok;
63538-extern atomic_t fscache_n_allocs_wait;
63539-extern atomic_t fscache_n_allocs_nobufs;
63540-extern atomic_t fscache_n_allocs_intr;
63541-extern atomic_t fscache_n_allocs_object_dead;
63542-extern atomic_t fscache_n_alloc_ops;
63543-extern atomic_t fscache_n_alloc_op_waits;
63544+extern atomic_unchecked_t fscache_n_allocs;
63545+extern atomic_unchecked_t fscache_n_allocs_ok;
63546+extern atomic_unchecked_t fscache_n_allocs_wait;
63547+extern atomic_unchecked_t fscache_n_allocs_nobufs;
63548+extern atomic_unchecked_t fscache_n_allocs_intr;
63549+extern atomic_unchecked_t fscache_n_allocs_object_dead;
63550+extern atomic_unchecked_t fscache_n_alloc_ops;
63551+extern atomic_unchecked_t fscache_n_alloc_op_waits;
63552
63553-extern atomic_t fscache_n_retrievals;
63554-extern atomic_t fscache_n_retrievals_ok;
63555-extern atomic_t fscache_n_retrievals_wait;
63556-extern atomic_t fscache_n_retrievals_nodata;
63557-extern atomic_t fscache_n_retrievals_nobufs;
63558-extern atomic_t fscache_n_retrievals_intr;
63559-extern atomic_t fscache_n_retrievals_nomem;
63560-extern atomic_t fscache_n_retrievals_object_dead;
63561-extern atomic_t fscache_n_retrieval_ops;
63562-extern atomic_t fscache_n_retrieval_op_waits;
63563+extern atomic_unchecked_t fscache_n_retrievals;
63564+extern atomic_unchecked_t fscache_n_retrievals_ok;
63565+extern atomic_unchecked_t fscache_n_retrievals_wait;
63566+extern atomic_unchecked_t fscache_n_retrievals_nodata;
63567+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
63568+extern atomic_unchecked_t fscache_n_retrievals_intr;
63569+extern atomic_unchecked_t fscache_n_retrievals_nomem;
63570+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
63571+extern atomic_unchecked_t fscache_n_retrieval_ops;
63572+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
63573
63574-extern atomic_t fscache_n_stores;
63575-extern atomic_t fscache_n_stores_ok;
63576-extern atomic_t fscache_n_stores_again;
63577-extern atomic_t fscache_n_stores_nobufs;
63578-extern atomic_t fscache_n_stores_oom;
63579-extern atomic_t fscache_n_store_ops;
63580-extern atomic_t fscache_n_store_calls;
63581-extern atomic_t fscache_n_store_pages;
63582-extern atomic_t fscache_n_store_radix_deletes;
63583-extern atomic_t fscache_n_store_pages_over_limit;
63584+extern atomic_unchecked_t fscache_n_stores;
63585+extern atomic_unchecked_t fscache_n_stores_ok;
63586+extern atomic_unchecked_t fscache_n_stores_again;
63587+extern atomic_unchecked_t fscache_n_stores_nobufs;
63588+extern atomic_unchecked_t fscache_n_stores_oom;
63589+extern atomic_unchecked_t fscache_n_store_ops;
63590+extern atomic_unchecked_t fscache_n_store_calls;
63591+extern atomic_unchecked_t fscache_n_store_pages;
63592+extern atomic_unchecked_t fscache_n_store_radix_deletes;
63593+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
63594
63595-extern atomic_t fscache_n_store_vmscan_not_storing;
63596-extern atomic_t fscache_n_store_vmscan_gone;
63597-extern atomic_t fscache_n_store_vmscan_busy;
63598-extern atomic_t fscache_n_store_vmscan_cancelled;
63599-extern atomic_t fscache_n_store_vmscan_wait;
63600+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
63601+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
63602+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
63603+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
63604+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
63605
63606-extern atomic_t fscache_n_marks;
63607-extern atomic_t fscache_n_uncaches;
63608+extern atomic_unchecked_t fscache_n_marks;
63609+extern atomic_unchecked_t fscache_n_uncaches;
63610
63611-extern atomic_t fscache_n_acquires;
63612-extern atomic_t fscache_n_acquires_null;
63613-extern atomic_t fscache_n_acquires_no_cache;
63614-extern atomic_t fscache_n_acquires_ok;
63615-extern atomic_t fscache_n_acquires_nobufs;
63616-extern atomic_t fscache_n_acquires_oom;
63617+extern atomic_unchecked_t fscache_n_acquires;
63618+extern atomic_unchecked_t fscache_n_acquires_null;
63619+extern atomic_unchecked_t fscache_n_acquires_no_cache;
63620+extern atomic_unchecked_t fscache_n_acquires_ok;
63621+extern atomic_unchecked_t fscache_n_acquires_nobufs;
63622+extern atomic_unchecked_t fscache_n_acquires_oom;
63623
63624-extern atomic_t fscache_n_invalidates;
63625-extern atomic_t fscache_n_invalidates_run;
63626+extern atomic_unchecked_t fscache_n_invalidates;
63627+extern atomic_unchecked_t fscache_n_invalidates_run;
63628
63629-extern atomic_t fscache_n_updates;
63630-extern atomic_t fscache_n_updates_null;
63631-extern atomic_t fscache_n_updates_run;
63632+extern atomic_unchecked_t fscache_n_updates;
63633+extern atomic_unchecked_t fscache_n_updates_null;
63634+extern atomic_unchecked_t fscache_n_updates_run;
63635
63636-extern atomic_t fscache_n_relinquishes;
63637-extern atomic_t fscache_n_relinquishes_null;
63638-extern atomic_t fscache_n_relinquishes_waitcrt;
63639-extern atomic_t fscache_n_relinquishes_retire;
63640+extern atomic_unchecked_t fscache_n_relinquishes;
63641+extern atomic_unchecked_t fscache_n_relinquishes_null;
63642+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
63643+extern atomic_unchecked_t fscache_n_relinquishes_retire;
63644
63645-extern atomic_t fscache_n_cookie_index;
63646-extern atomic_t fscache_n_cookie_data;
63647-extern atomic_t fscache_n_cookie_special;
63648+extern atomic_unchecked_t fscache_n_cookie_index;
63649+extern atomic_unchecked_t fscache_n_cookie_data;
63650+extern atomic_unchecked_t fscache_n_cookie_special;
63651
63652-extern atomic_t fscache_n_object_alloc;
63653-extern atomic_t fscache_n_object_no_alloc;
63654-extern atomic_t fscache_n_object_lookups;
63655-extern atomic_t fscache_n_object_lookups_negative;
63656-extern atomic_t fscache_n_object_lookups_positive;
63657-extern atomic_t fscache_n_object_lookups_timed_out;
63658-extern atomic_t fscache_n_object_created;
63659-extern atomic_t fscache_n_object_avail;
63660-extern atomic_t fscache_n_object_dead;
63661+extern atomic_unchecked_t fscache_n_object_alloc;
63662+extern atomic_unchecked_t fscache_n_object_no_alloc;
63663+extern atomic_unchecked_t fscache_n_object_lookups;
63664+extern atomic_unchecked_t fscache_n_object_lookups_negative;
63665+extern atomic_unchecked_t fscache_n_object_lookups_positive;
63666+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
63667+extern atomic_unchecked_t fscache_n_object_created;
63668+extern atomic_unchecked_t fscache_n_object_avail;
63669+extern atomic_unchecked_t fscache_n_object_dead;
63670
63671-extern atomic_t fscache_n_checkaux_none;
63672-extern atomic_t fscache_n_checkaux_okay;
63673-extern atomic_t fscache_n_checkaux_update;
63674-extern atomic_t fscache_n_checkaux_obsolete;
63675+extern atomic_unchecked_t fscache_n_checkaux_none;
63676+extern atomic_unchecked_t fscache_n_checkaux_okay;
63677+extern atomic_unchecked_t fscache_n_checkaux_update;
63678+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
63679
63680 extern atomic_t fscache_n_cop_alloc_object;
63681 extern atomic_t fscache_n_cop_lookup_object;
63682@@ -276,6 +276,11 @@ static inline void fscache_stat(atomic_t *stat)
63683 atomic_inc(stat);
63684 }
63685
63686+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
63687+{
63688+ atomic_inc_unchecked(stat);
63689+}
63690+
63691 static inline void fscache_stat_d(atomic_t *stat)
63692 {
63693 atomic_dec(stat);
63694@@ -288,6 +293,7 @@ extern const struct file_operations fscache_stats_fops;
63695
63696 #define __fscache_stat(stat) (NULL)
63697 #define fscache_stat(stat) do {} while (0)
63698+#define fscache_stat_unchecked(stat) do {} while (0)
63699 #define fscache_stat_d(stat) do {} while (0)
63700 #endif
63701
63702diff --git a/fs/fscache/object.c b/fs/fscache/object.c
63703index da032da..0076ce7 100644
63704--- a/fs/fscache/object.c
63705+++ b/fs/fscache/object.c
63706@@ -454,7 +454,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
63707 _debug("LOOKUP \"%s\" in \"%s\"",
63708 cookie->def->name, object->cache->tag->name);
63709
63710- fscache_stat(&fscache_n_object_lookups);
63711+ fscache_stat_unchecked(&fscache_n_object_lookups);
63712 fscache_stat(&fscache_n_cop_lookup_object);
63713 ret = object->cache->ops->lookup_object(object);
63714 fscache_stat_d(&fscache_n_cop_lookup_object);
63715@@ -464,7 +464,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
63716 if (ret == -ETIMEDOUT) {
63717 /* probably stuck behind another object, so move this one to
63718 * the back of the queue */
63719- fscache_stat(&fscache_n_object_lookups_timed_out);
63720+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
63721 _leave(" [timeout]");
63722 return NO_TRANSIT;
63723 }
63724@@ -492,7 +492,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
63725 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
63726
63727 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
63728- fscache_stat(&fscache_n_object_lookups_negative);
63729+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
63730
63731 /* Allow write requests to begin stacking up and read requests to begin
63732 * returning ENODATA.
63733@@ -527,7 +527,7 @@ void fscache_obtained_object(struct fscache_object *object)
63734 /* if we were still looking up, then we must have a positive lookup
63735 * result, in which case there may be data available */
63736 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
63737- fscache_stat(&fscache_n_object_lookups_positive);
63738+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
63739
63740 /* We do (presumably) have data */
63741 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
63742@@ -539,7 +539,7 @@ void fscache_obtained_object(struct fscache_object *object)
63743 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
63744 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
63745 } else {
63746- fscache_stat(&fscache_n_object_created);
63747+ fscache_stat_unchecked(&fscache_n_object_created);
63748 }
63749
63750 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
63751@@ -575,7 +575,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
63752 fscache_stat_d(&fscache_n_cop_lookup_complete);
63753
63754 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
63755- fscache_stat(&fscache_n_object_avail);
63756+ fscache_stat_unchecked(&fscache_n_object_avail);
63757
63758 _leave("");
63759 return transit_to(JUMPSTART_DEPS);
63760@@ -722,7 +722,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
63761
63762 /* this just shifts the object release to the work processor */
63763 fscache_put_object(object);
63764- fscache_stat(&fscache_n_object_dead);
63765+ fscache_stat_unchecked(&fscache_n_object_dead);
63766
63767 _leave("");
63768 return transit_to(OBJECT_DEAD);
63769@@ -887,7 +887,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
63770 enum fscache_checkaux result;
63771
63772 if (!object->cookie->def->check_aux) {
63773- fscache_stat(&fscache_n_checkaux_none);
63774+ fscache_stat_unchecked(&fscache_n_checkaux_none);
63775 return FSCACHE_CHECKAUX_OKAY;
63776 }
63777
63778@@ -896,17 +896,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
63779 switch (result) {
63780 /* entry okay as is */
63781 case FSCACHE_CHECKAUX_OKAY:
63782- fscache_stat(&fscache_n_checkaux_okay);
63783+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
63784 break;
63785
63786 /* entry requires update */
63787 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
63788- fscache_stat(&fscache_n_checkaux_update);
63789+ fscache_stat_unchecked(&fscache_n_checkaux_update);
63790 break;
63791
63792 /* entry requires deletion */
63793 case FSCACHE_CHECKAUX_OBSOLETE:
63794- fscache_stat(&fscache_n_checkaux_obsolete);
63795+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
63796 break;
63797
63798 default:
63799@@ -993,7 +993,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
63800 {
63801 const struct fscache_state *s;
63802
63803- fscache_stat(&fscache_n_invalidates_run);
63804+ fscache_stat_unchecked(&fscache_n_invalidates_run);
63805 fscache_stat(&fscache_n_cop_invalidate_object);
63806 s = _fscache_invalidate_object(object, event);
63807 fscache_stat_d(&fscache_n_cop_invalidate_object);
63808@@ -1008,7 +1008,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
63809 {
63810 _enter("{OBJ%x},%d", object->debug_id, event);
63811
63812- fscache_stat(&fscache_n_updates_run);
63813+ fscache_stat_unchecked(&fscache_n_updates_run);
63814 fscache_stat(&fscache_n_cop_update_object);
63815 object->cache->ops->update_object(object);
63816 fscache_stat_d(&fscache_n_cop_update_object);
63817diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
63818index e7b87a0..a85d47a 100644
63819--- a/fs/fscache/operation.c
63820+++ b/fs/fscache/operation.c
63821@@ -17,7 +17,7 @@
63822 #include <linux/slab.h>
63823 #include "internal.h"
63824
63825-atomic_t fscache_op_debug_id;
63826+atomic_unchecked_t fscache_op_debug_id;
63827 EXPORT_SYMBOL(fscache_op_debug_id);
63828
63829 /**
63830@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
63831 ASSERTCMP(atomic_read(&op->usage), >, 0);
63832 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
63833
63834- fscache_stat(&fscache_n_op_enqueue);
63835+ fscache_stat_unchecked(&fscache_n_op_enqueue);
63836 switch (op->flags & FSCACHE_OP_TYPE) {
63837 case FSCACHE_OP_ASYNC:
63838 _debug("queue async");
63839@@ -72,7 +72,7 @@ static void fscache_run_op(struct fscache_object *object,
63840 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
63841 if (op->processor)
63842 fscache_enqueue_operation(op);
63843- fscache_stat(&fscache_n_op_run);
63844+ fscache_stat_unchecked(&fscache_n_op_run);
63845 }
63846
63847 /*
63848@@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
63849 if (object->n_in_progress > 0) {
63850 atomic_inc(&op->usage);
63851 list_add_tail(&op->pend_link, &object->pending_ops);
63852- fscache_stat(&fscache_n_op_pend);
63853+ fscache_stat_unchecked(&fscache_n_op_pend);
63854 } else if (!list_empty(&object->pending_ops)) {
63855 atomic_inc(&op->usage);
63856 list_add_tail(&op->pend_link, &object->pending_ops);
63857- fscache_stat(&fscache_n_op_pend);
63858+ fscache_stat_unchecked(&fscache_n_op_pend);
63859 fscache_start_operations(object);
63860 } else {
63861 ASSERTCMP(object->n_in_progress, ==, 0);
63862@@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
63863 object->n_exclusive++; /* reads and writes must wait */
63864 atomic_inc(&op->usage);
63865 list_add_tail(&op->pend_link, &object->pending_ops);
63866- fscache_stat(&fscache_n_op_pend);
63867+ fscache_stat_unchecked(&fscache_n_op_pend);
63868 ret = 0;
63869 } else {
63870 /* If we're in any other state, there must have been an I/O
63871@@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_object *object,
63872 if (object->n_exclusive > 0) {
63873 atomic_inc(&op->usage);
63874 list_add_tail(&op->pend_link, &object->pending_ops);
63875- fscache_stat(&fscache_n_op_pend);
63876+ fscache_stat_unchecked(&fscache_n_op_pend);
63877 } else if (!list_empty(&object->pending_ops)) {
63878 atomic_inc(&op->usage);
63879 list_add_tail(&op->pend_link, &object->pending_ops);
63880- fscache_stat(&fscache_n_op_pend);
63881+ fscache_stat_unchecked(&fscache_n_op_pend);
63882 fscache_start_operations(object);
63883 } else {
63884 ASSERTCMP(object->n_exclusive, ==, 0);
63885@@ -227,10 +227,10 @@ int fscache_submit_op(struct fscache_object *object,
63886 object->n_ops++;
63887 atomic_inc(&op->usage);
63888 list_add_tail(&op->pend_link, &object->pending_ops);
63889- fscache_stat(&fscache_n_op_pend);
63890+ fscache_stat_unchecked(&fscache_n_op_pend);
63891 ret = 0;
63892 } else if (fscache_object_is_dying(object)) {
63893- fscache_stat(&fscache_n_op_rejected);
63894+ fscache_stat_unchecked(&fscache_n_op_rejected);
63895 op->state = FSCACHE_OP_ST_CANCELLED;
63896 ret = -ENOBUFS;
63897 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
63898@@ -309,7 +309,7 @@ int fscache_cancel_op(struct fscache_operation *op,
63899 ret = -EBUSY;
63900 if (op->state == FSCACHE_OP_ST_PENDING) {
63901 ASSERT(!list_empty(&op->pend_link));
63902- fscache_stat(&fscache_n_op_cancelled);
63903+ fscache_stat_unchecked(&fscache_n_op_cancelled);
63904 list_del_init(&op->pend_link);
63905 if (do_cancel)
63906 do_cancel(op);
63907@@ -341,7 +341,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
63908 while (!list_empty(&object->pending_ops)) {
63909 op = list_entry(object->pending_ops.next,
63910 struct fscache_operation, pend_link);
63911- fscache_stat(&fscache_n_op_cancelled);
63912+ fscache_stat_unchecked(&fscache_n_op_cancelled);
63913 list_del_init(&op->pend_link);
63914
63915 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
63916@@ -413,7 +413,7 @@ void fscache_put_operation(struct fscache_operation *op)
63917 op->state, ==, FSCACHE_OP_ST_CANCELLED);
63918 op->state = FSCACHE_OP_ST_DEAD;
63919
63920- fscache_stat(&fscache_n_op_release);
63921+ fscache_stat_unchecked(&fscache_n_op_release);
63922
63923 if (op->release) {
63924 op->release(op);
63925@@ -432,7 +432,7 @@ void fscache_put_operation(struct fscache_operation *op)
63926 * lock, and defer it otherwise */
63927 if (!spin_trylock(&object->lock)) {
63928 _debug("defer put");
63929- fscache_stat(&fscache_n_op_deferred_release);
63930+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
63931
63932 cache = object->cache;
63933 spin_lock(&cache->op_gc_list_lock);
63934@@ -485,7 +485,7 @@ void fscache_operation_gc(struct work_struct *work)
63935
63936 _debug("GC DEFERRED REL OBJ%x OP%x",
63937 object->debug_id, op->debug_id);
63938- fscache_stat(&fscache_n_op_gc);
63939+ fscache_stat_unchecked(&fscache_n_op_gc);
63940
63941 ASSERTCMP(atomic_read(&op->usage), ==, 0);
63942 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
63943diff --git a/fs/fscache/page.c b/fs/fscache/page.c
63944index de33b3f..8be4d29 100644
63945--- a/fs/fscache/page.c
63946+++ b/fs/fscache/page.c
63947@@ -74,7 +74,7 @@ try_again:
63948 val = radix_tree_lookup(&cookie->stores, page->index);
63949 if (!val) {
63950 rcu_read_unlock();
63951- fscache_stat(&fscache_n_store_vmscan_not_storing);
63952+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
63953 __fscache_uncache_page(cookie, page);
63954 return true;
63955 }
63956@@ -104,11 +104,11 @@ try_again:
63957 spin_unlock(&cookie->stores_lock);
63958
63959 if (xpage) {
63960- fscache_stat(&fscache_n_store_vmscan_cancelled);
63961- fscache_stat(&fscache_n_store_radix_deletes);
63962+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
63963+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
63964 ASSERTCMP(xpage, ==, page);
63965 } else {
63966- fscache_stat(&fscache_n_store_vmscan_gone);
63967+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
63968 }
63969
63970 wake_up_bit(&cookie->flags, 0);
63971@@ -123,11 +123,11 @@ page_busy:
63972 * sleeping on memory allocation, so we may need to impose a timeout
63973 * too. */
63974 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
63975- fscache_stat(&fscache_n_store_vmscan_busy);
63976+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
63977 return false;
63978 }
63979
63980- fscache_stat(&fscache_n_store_vmscan_wait);
63981+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
63982 if (!release_page_wait_timeout(cookie, page))
63983 _debug("fscache writeout timeout page: %p{%lx}",
63984 page, page->index);
63985@@ -156,7 +156,7 @@ static void fscache_end_page_write(struct fscache_object *object,
63986 FSCACHE_COOKIE_STORING_TAG);
63987 if (!radix_tree_tag_get(&cookie->stores, page->index,
63988 FSCACHE_COOKIE_PENDING_TAG)) {
63989- fscache_stat(&fscache_n_store_radix_deletes);
63990+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
63991 xpage = radix_tree_delete(&cookie->stores, page->index);
63992 }
63993 spin_unlock(&cookie->stores_lock);
63994@@ -177,7 +177,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
63995
63996 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
63997
63998- fscache_stat(&fscache_n_attr_changed_calls);
63999+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
64000
64001 if (fscache_object_is_active(object)) {
64002 fscache_stat(&fscache_n_cop_attr_changed);
64003@@ -204,11 +204,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
64004
64005 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
64006
64007- fscache_stat(&fscache_n_attr_changed);
64008+ fscache_stat_unchecked(&fscache_n_attr_changed);
64009
64010 op = kzalloc(sizeof(*op), GFP_KERNEL);
64011 if (!op) {
64012- fscache_stat(&fscache_n_attr_changed_nomem);
64013+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
64014 _leave(" = -ENOMEM");
64015 return -ENOMEM;
64016 }
64017@@ -230,7 +230,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
64018 if (fscache_submit_exclusive_op(object, op) < 0)
64019 goto nobufs_dec;
64020 spin_unlock(&cookie->lock);
64021- fscache_stat(&fscache_n_attr_changed_ok);
64022+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
64023 fscache_put_operation(op);
64024 _leave(" = 0");
64025 return 0;
64026@@ -242,7 +242,7 @@ nobufs:
64027 kfree(op);
64028 if (wake_cookie)
64029 __fscache_wake_unused_cookie(cookie);
64030- fscache_stat(&fscache_n_attr_changed_nobufs);
64031+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
64032 _leave(" = %d", -ENOBUFS);
64033 return -ENOBUFS;
64034 }
64035@@ -281,7 +281,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
64036 /* allocate a retrieval operation and attempt to submit it */
64037 op = kzalloc(sizeof(*op), GFP_NOIO);
64038 if (!op) {
64039- fscache_stat(&fscache_n_retrievals_nomem);
64040+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
64041 return NULL;
64042 }
64043
64044@@ -311,12 +311,12 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
64045 return 0;
64046 }
64047
64048- fscache_stat(&fscache_n_retrievals_wait);
64049+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
64050
64051 jif = jiffies;
64052 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
64053 TASK_INTERRUPTIBLE) != 0) {
64054- fscache_stat(&fscache_n_retrievals_intr);
64055+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
64056 _leave(" = -ERESTARTSYS");
64057 return -ERESTARTSYS;
64058 }
64059@@ -345,8 +345,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
64060 */
64061 int fscache_wait_for_operation_activation(struct fscache_object *object,
64062 struct fscache_operation *op,
64063- atomic_t *stat_op_waits,
64064- atomic_t *stat_object_dead,
64065+ atomic_unchecked_t *stat_op_waits,
64066+ atomic_unchecked_t *stat_object_dead,
64067 void (*do_cancel)(struct fscache_operation *))
64068 {
64069 int ret;
64070@@ -356,7 +356,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
64071
64072 _debug(">>> WT");
64073 if (stat_op_waits)
64074- fscache_stat(stat_op_waits);
64075+ fscache_stat_unchecked(stat_op_waits);
64076 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
64077 TASK_INTERRUPTIBLE) != 0) {
64078 ret = fscache_cancel_op(op, do_cancel);
64079@@ -373,7 +373,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
64080 check_if_dead:
64081 if (op->state == FSCACHE_OP_ST_CANCELLED) {
64082 if (stat_object_dead)
64083- fscache_stat(stat_object_dead);
64084+ fscache_stat_unchecked(stat_object_dead);
64085 _leave(" = -ENOBUFS [cancelled]");
64086 return -ENOBUFS;
64087 }
64088@@ -381,7 +381,7 @@ check_if_dead:
64089 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
64090 fscache_cancel_op(op, do_cancel);
64091 if (stat_object_dead)
64092- fscache_stat(stat_object_dead);
64093+ fscache_stat_unchecked(stat_object_dead);
64094 return -ENOBUFS;
64095 }
64096 return 0;
64097@@ -409,7 +409,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
64098
64099 _enter("%p,%p,,,", cookie, page);
64100
64101- fscache_stat(&fscache_n_retrievals);
64102+ fscache_stat_unchecked(&fscache_n_retrievals);
64103
64104 if (hlist_empty(&cookie->backing_objects))
64105 goto nobufs;
64106@@ -451,7 +451,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
64107 goto nobufs_unlock_dec;
64108 spin_unlock(&cookie->lock);
64109
64110- fscache_stat(&fscache_n_retrieval_ops);
64111+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
64112
64113 /* pin the netfs read context in case we need to do the actual netfs
64114 * read because we've encountered a cache read failure */
64115@@ -482,15 +482,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
64116
64117 error:
64118 if (ret == -ENOMEM)
64119- fscache_stat(&fscache_n_retrievals_nomem);
64120+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
64121 else if (ret == -ERESTARTSYS)
64122- fscache_stat(&fscache_n_retrievals_intr);
64123+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
64124 else if (ret == -ENODATA)
64125- fscache_stat(&fscache_n_retrievals_nodata);
64126+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
64127 else if (ret < 0)
64128- fscache_stat(&fscache_n_retrievals_nobufs);
64129+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
64130 else
64131- fscache_stat(&fscache_n_retrievals_ok);
64132+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
64133
64134 fscache_put_retrieval(op);
64135 _leave(" = %d", ret);
64136@@ -505,7 +505,7 @@ nobufs_unlock:
64137 __fscache_wake_unused_cookie(cookie);
64138 kfree(op);
64139 nobufs:
64140- fscache_stat(&fscache_n_retrievals_nobufs);
64141+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
64142 _leave(" = -ENOBUFS");
64143 return -ENOBUFS;
64144 }
64145@@ -544,7 +544,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
64146
64147 _enter("%p,,%d,,,", cookie, *nr_pages);
64148
64149- fscache_stat(&fscache_n_retrievals);
64150+ fscache_stat_unchecked(&fscache_n_retrievals);
64151
64152 if (hlist_empty(&cookie->backing_objects))
64153 goto nobufs;
64154@@ -582,7 +582,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
64155 goto nobufs_unlock_dec;
64156 spin_unlock(&cookie->lock);
64157
64158- fscache_stat(&fscache_n_retrieval_ops);
64159+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
64160
64161 /* pin the netfs read context in case we need to do the actual netfs
64162 * read because we've encountered a cache read failure */
64163@@ -613,15 +613,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
64164
64165 error:
64166 if (ret == -ENOMEM)
64167- fscache_stat(&fscache_n_retrievals_nomem);
64168+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
64169 else if (ret == -ERESTARTSYS)
64170- fscache_stat(&fscache_n_retrievals_intr);
64171+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
64172 else if (ret == -ENODATA)
64173- fscache_stat(&fscache_n_retrievals_nodata);
64174+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
64175 else if (ret < 0)
64176- fscache_stat(&fscache_n_retrievals_nobufs);
64177+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
64178 else
64179- fscache_stat(&fscache_n_retrievals_ok);
64180+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
64181
64182 fscache_put_retrieval(op);
64183 _leave(" = %d", ret);
64184@@ -636,7 +636,7 @@ nobufs_unlock:
64185 if (wake_cookie)
64186 __fscache_wake_unused_cookie(cookie);
64187 nobufs:
64188- fscache_stat(&fscache_n_retrievals_nobufs);
64189+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
64190 _leave(" = -ENOBUFS");
64191 return -ENOBUFS;
64192 }
64193@@ -661,7 +661,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
64194
64195 _enter("%p,%p,,,", cookie, page);
64196
64197- fscache_stat(&fscache_n_allocs);
64198+ fscache_stat_unchecked(&fscache_n_allocs);
64199
64200 if (hlist_empty(&cookie->backing_objects))
64201 goto nobufs;
64202@@ -695,7 +695,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
64203 goto nobufs_unlock_dec;
64204 spin_unlock(&cookie->lock);
64205
64206- fscache_stat(&fscache_n_alloc_ops);
64207+ fscache_stat_unchecked(&fscache_n_alloc_ops);
64208
64209 ret = fscache_wait_for_operation_activation(
64210 object, &op->op,
64211@@ -712,11 +712,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
64212
64213 error:
64214 if (ret == -ERESTARTSYS)
64215- fscache_stat(&fscache_n_allocs_intr);
64216+ fscache_stat_unchecked(&fscache_n_allocs_intr);
64217 else if (ret < 0)
64218- fscache_stat(&fscache_n_allocs_nobufs);
64219+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
64220 else
64221- fscache_stat(&fscache_n_allocs_ok);
64222+ fscache_stat_unchecked(&fscache_n_allocs_ok);
64223
64224 fscache_put_retrieval(op);
64225 _leave(" = %d", ret);
64226@@ -730,7 +730,7 @@ nobufs_unlock:
64227 if (wake_cookie)
64228 __fscache_wake_unused_cookie(cookie);
64229 nobufs:
64230- fscache_stat(&fscache_n_allocs_nobufs);
64231+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
64232 _leave(" = -ENOBUFS");
64233 return -ENOBUFS;
64234 }
64235@@ -806,7 +806,7 @@ static void fscache_write_op(struct fscache_operation *_op)
64236
64237 spin_lock(&cookie->stores_lock);
64238
64239- fscache_stat(&fscache_n_store_calls);
64240+ fscache_stat_unchecked(&fscache_n_store_calls);
64241
64242 /* find a page to store */
64243 page = NULL;
64244@@ -817,7 +817,7 @@ static void fscache_write_op(struct fscache_operation *_op)
64245 page = results[0];
64246 _debug("gang %d [%lx]", n, page->index);
64247 if (page->index > op->store_limit) {
64248- fscache_stat(&fscache_n_store_pages_over_limit);
64249+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
64250 goto superseded;
64251 }
64252
64253@@ -829,7 +829,7 @@ static void fscache_write_op(struct fscache_operation *_op)
64254 spin_unlock(&cookie->stores_lock);
64255 spin_unlock(&object->lock);
64256
64257- fscache_stat(&fscache_n_store_pages);
64258+ fscache_stat_unchecked(&fscache_n_store_pages);
64259 fscache_stat(&fscache_n_cop_write_page);
64260 ret = object->cache->ops->write_page(op, page);
64261 fscache_stat_d(&fscache_n_cop_write_page);
64262@@ -933,7 +933,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
64263 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
64264 ASSERT(PageFsCache(page));
64265
64266- fscache_stat(&fscache_n_stores);
64267+ fscache_stat_unchecked(&fscache_n_stores);
64268
64269 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
64270 _leave(" = -ENOBUFS [invalidating]");
64271@@ -992,7 +992,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
64272 spin_unlock(&cookie->stores_lock);
64273 spin_unlock(&object->lock);
64274
64275- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
64276+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
64277 op->store_limit = object->store_limit;
64278
64279 __fscache_use_cookie(cookie);
64280@@ -1001,8 +1001,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
64281
64282 spin_unlock(&cookie->lock);
64283 radix_tree_preload_end();
64284- fscache_stat(&fscache_n_store_ops);
64285- fscache_stat(&fscache_n_stores_ok);
64286+ fscache_stat_unchecked(&fscache_n_store_ops);
64287+ fscache_stat_unchecked(&fscache_n_stores_ok);
64288
64289 /* the work queue now carries its own ref on the object */
64290 fscache_put_operation(&op->op);
64291@@ -1010,14 +1010,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
64292 return 0;
64293
64294 already_queued:
64295- fscache_stat(&fscache_n_stores_again);
64296+ fscache_stat_unchecked(&fscache_n_stores_again);
64297 already_pending:
64298 spin_unlock(&cookie->stores_lock);
64299 spin_unlock(&object->lock);
64300 spin_unlock(&cookie->lock);
64301 radix_tree_preload_end();
64302 kfree(op);
64303- fscache_stat(&fscache_n_stores_ok);
64304+ fscache_stat_unchecked(&fscache_n_stores_ok);
64305 _leave(" = 0");
64306 return 0;
64307
64308@@ -1039,14 +1039,14 @@ nobufs:
64309 kfree(op);
64310 if (wake_cookie)
64311 __fscache_wake_unused_cookie(cookie);
64312- fscache_stat(&fscache_n_stores_nobufs);
64313+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
64314 _leave(" = -ENOBUFS");
64315 return -ENOBUFS;
64316
64317 nomem_free:
64318 kfree(op);
64319 nomem:
64320- fscache_stat(&fscache_n_stores_oom);
64321+ fscache_stat_unchecked(&fscache_n_stores_oom);
64322 _leave(" = -ENOMEM");
64323 return -ENOMEM;
64324 }
64325@@ -1064,7 +1064,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
64326 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
64327 ASSERTCMP(page, !=, NULL);
64328
64329- fscache_stat(&fscache_n_uncaches);
64330+ fscache_stat_unchecked(&fscache_n_uncaches);
64331
64332 /* cache withdrawal may beat us to it */
64333 if (!PageFsCache(page))
64334@@ -1115,7 +1115,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
64335 struct fscache_cookie *cookie = op->op.object->cookie;
64336
64337 #ifdef CONFIG_FSCACHE_STATS
64338- atomic_inc(&fscache_n_marks);
64339+ atomic_inc_unchecked(&fscache_n_marks);
64340 #endif
64341
64342 _debug("- mark %p{%lx}", page, page->index);
64343diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
64344index 40d13c7..ddf52b9 100644
64345--- a/fs/fscache/stats.c
64346+++ b/fs/fscache/stats.c
64347@@ -18,99 +18,99 @@
64348 /*
64349 * operation counters
64350 */
64351-atomic_t fscache_n_op_pend;
64352-atomic_t fscache_n_op_run;
64353-atomic_t fscache_n_op_enqueue;
64354-atomic_t fscache_n_op_requeue;
64355-atomic_t fscache_n_op_deferred_release;
64356-atomic_t fscache_n_op_release;
64357-atomic_t fscache_n_op_gc;
64358-atomic_t fscache_n_op_cancelled;
64359-atomic_t fscache_n_op_rejected;
64360+atomic_unchecked_t fscache_n_op_pend;
64361+atomic_unchecked_t fscache_n_op_run;
64362+atomic_unchecked_t fscache_n_op_enqueue;
64363+atomic_unchecked_t fscache_n_op_requeue;
64364+atomic_unchecked_t fscache_n_op_deferred_release;
64365+atomic_unchecked_t fscache_n_op_release;
64366+atomic_unchecked_t fscache_n_op_gc;
64367+atomic_unchecked_t fscache_n_op_cancelled;
64368+atomic_unchecked_t fscache_n_op_rejected;
64369
64370-atomic_t fscache_n_attr_changed;
64371-atomic_t fscache_n_attr_changed_ok;
64372-atomic_t fscache_n_attr_changed_nobufs;
64373-atomic_t fscache_n_attr_changed_nomem;
64374-atomic_t fscache_n_attr_changed_calls;
64375+atomic_unchecked_t fscache_n_attr_changed;
64376+atomic_unchecked_t fscache_n_attr_changed_ok;
64377+atomic_unchecked_t fscache_n_attr_changed_nobufs;
64378+atomic_unchecked_t fscache_n_attr_changed_nomem;
64379+atomic_unchecked_t fscache_n_attr_changed_calls;
64380
64381-atomic_t fscache_n_allocs;
64382-atomic_t fscache_n_allocs_ok;
64383-atomic_t fscache_n_allocs_wait;
64384-atomic_t fscache_n_allocs_nobufs;
64385-atomic_t fscache_n_allocs_intr;
64386-atomic_t fscache_n_allocs_object_dead;
64387-atomic_t fscache_n_alloc_ops;
64388-atomic_t fscache_n_alloc_op_waits;
64389+atomic_unchecked_t fscache_n_allocs;
64390+atomic_unchecked_t fscache_n_allocs_ok;
64391+atomic_unchecked_t fscache_n_allocs_wait;
64392+atomic_unchecked_t fscache_n_allocs_nobufs;
64393+atomic_unchecked_t fscache_n_allocs_intr;
64394+atomic_unchecked_t fscache_n_allocs_object_dead;
64395+atomic_unchecked_t fscache_n_alloc_ops;
64396+atomic_unchecked_t fscache_n_alloc_op_waits;
64397
64398-atomic_t fscache_n_retrievals;
64399-atomic_t fscache_n_retrievals_ok;
64400-atomic_t fscache_n_retrievals_wait;
64401-atomic_t fscache_n_retrievals_nodata;
64402-atomic_t fscache_n_retrievals_nobufs;
64403-atomic_t fscache_n_retrievals_intr;
64404-atomic_t fscache_n_retrievals_nomem;
64405-atomic_t fscache_n_retrievals_object_dead;
64406-atomic_t fscache_n_retrieval_ops;
64407-atomic_t fscache_n_retrieval_op_waits;
64408+atomic_unchecked_t fscache_n_retrievals;
64409+atomic_unchecked_t fscache_n_retrievals_ok;
64410+atomic_unchecked_t fscache_n_retrievals_wait;
64411+atomic_unchecked_t fscache_n_retrievals_nodata;
64412+atomic_unchecked_t fscache_n_retrievals_nobufs;
64413+atomic_unchecked_t fscache_n_retrievals_intr;
64414+atomic_unchecked_t fscache_n_retrievals_nomem;
64415+atomic_unchecked_t fscache_n_retrievals_object_dead;
64416+atomic_unchecked_t fscache_n_retrieval_ops;
64417+atomic_unchecked_t fscache_n_retrieval_op_waits;
64418
64419-atomic_t fscache_n_stores;
64420-atomic_t fscache_n_stores_ok;
64421-atomic_t fscache_n_stores_again;
64422-atomic_t fscache_n_stores_nobufs;
64423-atomic_t fscache_n_stores_oom;
64424-atomic_t fscache_n_store_ops;
64425-atomic_t fscache_n_store_calls;
64426-atomic_t fscache_n_store_pages;
64427-atomic_t fscache_n_store_radix_deletes;
64428-atomic_t fscache_n_store_pages_over_limit;
64429+atomic_unchecked_t fscache_n_stores;
64430+atomic_unchecked_t fscache_n_stores_ok;
64431+atomic_unchecked_t fscache_n_stores_again;
64432+atomic_unchecked_t fscache_n_stores_nobufs;
64433+atomic_unchecked_t fscache_n_stores_oom;
64434+atomic_unchecked_t fscache_n_store_ops;
64435+atomic_unchecked_t fscache_n_store_calls;
64436+atomic_unchecked_t fscache_n_store_pages;
64437+atomic_unchecked_t fscache_n_store_radix_deletes;
64438+atomic_unchecked_t fscache_n_store_pages_over_limit;
64439
64440-atomic_t fscache_n_store_vmscan_not_storing;
64441-atomic_t fscache_n_store_vmscan_gone;
64442-atomic_t fscache_n_store_vmscan_busy;
64443-atomic_t fscache_n_store_vmscan_cancelled;
64444-atomic_t fscache_n_store_vmscan_wait;
64445+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
64446+atomic_unchecked_t fscache_n_store_vmscan_gone;
64447+atomic_unchecked_t fscache_n_store_vmscan_busy;
64448+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
64449+atomic_unchecked_t fscache_n_store_vmscan_wait;
64450
64451-atomic_t fscache_n_marks;
64452-atomic_t fscache_n_uncaches;
64453+atomic_unchecked_t fscache_n_marks;
64454+atomic_unchecked_t fscache_n_uncaches;
64455
64456-atomic_t fscache_n_acquires;
64457-atomic_t fscache_n_acquires_null;
64458-atomic_t fscache_n_acquires_no_cache;
64459-atomic_t fscache_n_acquires_ok;
64460-atomic_t fscache_n_acquires_nobufs;
64461-atomic_t fscache_n_acquires_oom;
64462+atomic_unchecked_t fscache_n_acquires;
64463+atomic_unchecked_t fscache_n_acquires_null;
64464+atomic_unchecked_t fscache_n_acquires_no_cache;
64465+atomic_unchecked_t fscache_n_acquires_ok;
64466+atomic_unchecked_t fscache_n_acquires_nobufs;
64467+atomic_unchecked_t fscache_n_acquires_oom;
64468
64469-atomic_t fscache_n_invalidates;
64470-atomic_t fscache_n_invalidates_run;
64471+atomic_unchecked_t fscache_n_invalidates;
64472+atomic_unchecked_t fscache_n_invalidates_run;
64473
64474-atomic_t fscache_n_updates;
64475-atomic_t fscache_n_updates_null;
64476-atomic_t fscache_n_updates_run;
64477+atomic_unchecked_t fscache_n_updates;
64478+atomic_unchecked_t fscache_n_updates_null;
64479+atomic_unchecked_t fscache_n_updates_run;
64480
64481-atomic_t fscache_n_relinquishes;
64482-atomic_t fscache_n_relinquishes_null;
64483-atomic_t fscache_n_relinquishes_waitcrt;
64484-atomic_t fscache_n_relinquishes_retire;
64485+atomic_unchecked_t fscache_n_relinquishes;
64486+atomic_unchecked_t fscache_n_relinquishes_null;
64487+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
64488+atomic_unchecked_t fscache_n_relinquishes_retire;
64489
64490-atomic_t fscache_n_cookie_index;
64491-atomic_t fscache_n_cookie_data;
64492-atomic_t fscache_n_cookie_special;
64493+atomic_unchecked_t fscache_n_cookie_index;
64494+atomic_unchecked_t fscache_n_cookie_data;
64495+atomic_unchecked_t fscache_n_cookie_special;
64496
64497-atomic_t fscache_n_object_alloc;
64498-atomic_t fscache_n_object_no_alloc;
64499-atomic_t fscache_n_object_lookups;
64500-atomic_t fscache_n_object_lookups_negative;
64501-atomic_t fscache_n_object_lookups_positive;
64502-atomic_t fscache_n_object_lookups_timed_out;
64503-atomic_t fscache_n_object_created;
64504-atomic_t fscache_n_object_avail;
64505-atomic_t fscache_n_object_dead;
64506+atomic_unchecked_t fscache_n_object_alloc;
64507+atomic_unchecked_t fscache_n_object_no_alloc;
64508+atomic_unchecked_t fscache_n_object_lookups;
64509+atomic_unchecked_t fscache_n_object_lookups_negative;
64510+atomic_unchecked_t fscache_n_object_lookups_positive;
64511+atomic_unchecked_t fscache_n_object_lookups_timed_out;
64512+atomic_unchecked_t fscache_n_object_created;
64513+atomic_unchecked_t fscache_n_object_avail;
64514+atomic_unchecked_t fscache_n_object_dead;
64515
64516-atomic_t fscache_n_checkaux_none;
64517-atomic_t fscache_n_checkaux_okay;
64518-atomic_t fscache_n_checkaux_update;
64519-atomic_t fscache_n_checkaux_obsolete;
64520+atomic_unchecked_t fscache_n_checkaux_none;
64521+atomic_unchecked_t fscache_n_checkaux_okay;
64522+atomic_unchecked_t fscache_n_checkaux_update;
64523+atomic_unchecked_t fscache_n_checkaux_obsolete;
64524
64525 atomic_t fscache_n_cop_alloc_object;
64526 atomic_t fscache_n_cop_lookup_object;
64527@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
64528 seq_puts(m, "FS-Cache statistics\n");
64529
64530 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
64531- atomic_read(&fscache_n_cookie_index),
64532- atomic_read(&fscache_n_cookie_data),
64533- atomic_read(&fscache_n_cookie_special));
64534+ atomic_read_unchecked(&fscache_n_cookie_index),
64535+ atomic_read_unchecked(&fscache_n_cookie_data),
64536+ atomic_read_unchecked(&fscache_n_cookie_special));
64537
64538 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
64539- atomic_read(&fscache_n_object_alloc),
64540- atomic_read(&fscache_n_object_no_alloc),
64541- atomic_read(&fscache_n_object_avail),
64542- atomic_read(&fscache_n_object_dead));
64543+ atomic_read_unchecked(&fscache_n_object_alloc),
64544+ atomic_read_unchecked(&fscache_n_object_no_alloc),
64545+ atomic_read_unchecked(&fscache_n_object_avail),
64546+ atomic_read_unchecked(&fscache_n_object_dead));
64547 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
64548- atomic_read(&fscache_n_checkaux_none),
64549- atomic_read(&fscache_n_checkaux_okay),
64550- atomic_read(&fscache_n_checkaux_update),
64551- atomic_read(&fscache_n_checkaux_obsolete));
64552+ atomic_read_unchecked(&fscache_n_checkaux_none),
64553+ atomic_read_unchecked(&fscache_n_checkaux_okay),
64554+ atomic_read_unchecked(&fscache_n_checkaux_update),
64555+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
64556
64557 seq_printf(m, "Pages : mrk=%u unc=%u\n",
64558- atomic_read(&fscache_n_marks),
64559- atomic_read(&fscache_n_uncaches));
64560+ atomic_read_unchecked(&fscache_n_marks),
64561+ atomic_read_unchecked(&fscache_n_uncaches));
64562
64563 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
64564 " oom=%u\n",
64565- atomic_read(&fscache_n_acquires),
64566- atomic_read(&fscache_n_acquires_null),
64567- atomic_read(&fscache_n_acquires_no_cache),
64568- atomic_read(&fscache_n_acquires_ok),
64569- atomic_read(&fscache_n_acquires_nobufs),
64570- atomic_read(&fscache_n_acquires_oom));
64571+ atomic_read_unchecked(&fscache_n_acquires),
64572+ atomic_read_unchecked(&fscache_n_acquires_null),
64573+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
64574+ atomic_read_unchecked(&fscache_n_acquires_ok),
64575+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
64576+ atomic_read_unchecked(&fscache_n_acquires_oom));
64577
64578 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
64579- atomic_read(&fscache_n_object_lookups),
64580- atomic_read(&fscache_n_object_lookups_negative),
64581- atomic_read(&fscache_n_object_lookups_positive),
64582- atomic_read(&fscache_n_object_created),
64583- atomic_read(&fscache_n_object_lookups_timed_out));
64584+ atomic_read_unchecked(&fscache_n_object_lookups),
64585+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
64586+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
64587+ atomic_read_unchecked(&fscache_n_object_created),
64588+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
64589
64590 seq_printf(m, "Invals : n=%u run=%u\n",
64591- atomic_read(&fscache_n_invalidates),
64592- atomic_read(&fscache_n_invalidates_run));
64593+ atomic_read_unchecked(&fscache_n_invalidates),
64594+ atomic_read_unchecked(&fscache_n_invalidates_run));
64595
64596 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
64597- atomic_read(&fscache_n_updates),
64598- atomic_read(&fscache_n_updates_null),
64599- atomic_read(&fscache_n_updates_run));
64600+ atomic_read_unchecked(&fscache_n_updates),
64601+ atomic_read_unchecked(&fscache_n_updates_null),
64602+ atomic_read_unchecked(&fscache_n_updates_run));
64603
64604 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
64605- atomic_read(&fscache_n_relinquishes),
64606- atomic_read(&fscache_n_relinquishes_null),
64607- atomic_read(&fscache_n_relinquishes_waitcrt),
64608- atomic_read(&fscache_n_relinquishes_retire));
64609+ atomic_read_unchecked(&fscache_n_relinquishes),
64610+ atomic_read_unchecked(&fscache_n_relinquishes_null),
64611+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
64612+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
64613
64614 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
64615- atomic_read(&fscache_n_attr_changed),
64616- atomic_read(&fscache_n_attr_changed_ok),
64617- atomic_read(&fscache_n_attr_changed_nobufs),
64618- atomic_read(&fscache_n_attr_changed_nomem),
64619- atomic_read(&fscache_n_attr_changed_calls));
64620+ atomic_read_unchecked(&fscache_n_attr_changed),
64621+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
64622+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
64623+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
64624+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
64625
64626 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
64627- atomic_read(&fscache_n_allocs),
64628- atomic_read(&fscache_n_allocs_ok),
64629- atomic_read(&fscache_n_allocs_wait),
64630- atomic_read(&fscache_n_allocs_nobufs),
64631- atomic_read(&fscache_n_allocs_intr));
64632+ atomic_read_unchecked(&fscache_n_allocs),
64633+ atomic_read_unchecked(&fscache_n_allocs_ok),
64634+ atomic_read_unchecked(&fscache_n_allocs_wait),
64635+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
64636+ atomic_read_unchecked(&fscache_n_allocs_intr));
64637 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
64638- atomic_read(&fscache_n_alloc_ops),
64639- atomic_read(&fscache_n_alloc_op_waits),
64640- atomic_read(&fscache_n_allocs_object_dead));
64641+ atomic_read_unchecked(&fscache_n_alloc_ops),
64642+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
64643+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
64644
64645 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
64646 " int=%u oom=%u\n",
64647- atomic_read(&fscache_n_retrievals),
64648- atomic_read(&fscache_n_retrievals_ok),
64649- atomic_read(&fscache_n_retrievals_wait),
64650- atomic_read(&fscache_n_retrievals_nodata),
64651- atomic_read(&fscache_n_retrievals_nobufs),
64652- atomic_read(&fscache_n_retrievals_intr),
64653- atomic_read(&fscache_n_retrievals_nomem));
64654+ atomic_read_unchecked(&fscache_n_retrievals),
64655+ atomic_read_unchecked(&fscache_n_retrievals_ok),
64656+ atomic_read_unchecked(&fscache_n_retrievals_wait),
64657+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
64658+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
64659+ atomic_read_unchecked(&fscache_n_retrievals_intr),
64660+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
64661 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
64662- atomic_read(&fscache_n_retrieval_ops),
64663- atomic_read(&fscache_n_retrieval_op_waits),
64664- atomic_read(&fscache_n_retrievals_object_dead));
64665+ atomic_read_unchecked(&fscache_n_retrieval_ops),
64666+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
64667+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
64668
64669 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
64670- atomic_read(&fscache_n_stores),
64671- atomic_read(&fscache_n_stores_ok),
64672- atomic_read(&fscache_n_stores_again),
64673- atomic_read(&fscache_n_stores_nobufs),
64674- atomic_read(&fscache_n_stores_oom));
64675+ atomic_read_unchecked(&fscache_n_stores),
64676+ atomic_read_unchecked(&fscache_n_stores_ok),
64677+ atomic_read_unchecked(&fscache_n_stores_again),
64678+ atomic_read_unchecked(&fscache_n_stores_nobufs),
64679+ atomic_read_unchecked(&fscache_n_stores_oom));
64680 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
64681- atomic_read(&fscache_n_store_ops),
64682- atomic_read(&fscache_n_store_calls),
64683- atomic_read(&fscache_n_store_pages),
64684- atomic_read(&fscache_n_store_radix_deletes),
64685- atomic_read(&fscache_n_store_pages_over_limit));
64686+ atomic_read_unchecked(&fscache_n_store_ops),
64687+ atomic_read_unchecked(&fscache_n_store_calls),
64688+ atomic_read_unchecked(&fscache_n_store_pages),
64689+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
64690+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
64691
64692 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
64693- atomic_read(&fscache_n_store_vmscan_not_storing),
64694- atomic_read(&fscache_n_store_vmscan_gone),
64695- atomic_read(&fscache_n_store_vmscan_busy),
64696- atomic_read(&fscache_n_store_vmscan_cancelled),
64697- atomic_read(&fscache_n_store_vmscan_wait));
64698+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
64699+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
64700+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
64701+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
64702+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
64703
64704 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
64705- atomic_read(&fscache_n_op_pend),
64706- atomic_read(&fscache_n_op_run),
64707- atomic_read(&fscache_n_op_enqueue),
64708- atomic_read(&fscache_n_op_cancelled),
64709- atomic_read(&fscache_n_op_rejected));
64710+ atomic_read_unchecked(&fscache_n_op_pend),
64711+ atomic_read_unchecked(&fscache_n_op_run),
64712+ atomic_read_unchecked(&fscache_n_op_enqueue),
64713+ atomic_read_unchecked(&fscache_n_op_cancelled),
64714+ atomic_read_unchecked(&fscache_n_op_rejected));
64715 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
64716- atomic_read(&fscache_n_op_deferred_release),
64717- atomic_read(&fscache_n_op_release),
64718- atomic_read(&fscache_n_op_gc));
64719+ atomic_read_unchecked(&fscache_n_op_deferred_release),
64720+ atomic_read_unchecked(&fscache_n_op_release),
64721+ atomic_read_unchecked(&fscache_n_op_gc));
64722
64723 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
64724 atomic_read(&fscache_n_cop_alloc_object),
64725diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
64726index 28d0c7a..04816b7 100644
64727--- a/fs/fuse/cuse.c
64728+++ b/fs/fuse/cuse.c
64729@@ -611,10 +611,12 @@ static int __init cuse_init(void)
64730 INIT_LIST_HEAD(&cuse_conntbl[i]);
64731
64732 /* inherit and extend fuse_dev_operations */
64733- cuse_channel_fops = fuse_dev_operations;
64734- cuse_channel_fops.owner = THIS_MODULE;
64735- cuse_channel_fops.open = cuse_channel_open;
64736- cuse_channel_fops.release = cuse_channel_release;
64737+ pax_open_kernel();
64738+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
64739+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
64740+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
64741+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
64742+ pax_close_kernel();
64743
64744 cuse_class = class_create(THIS_MODULE, "cuse");
64745 if (IS_ERR(cuse_class))
64746diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
64747index 39706c5..a803c71 100644
64748--- a/fs/fuse/dev.c
64749+++ b/fs/fuse/dev.c
64750@@ -1405,7 +1405,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
64751 ret = 0;
64752 pipe_lock(pipe);
64753
64754- if (!pipe->readers) {
64755+ if (!atomic_read(&pipe->readers)) {
64756 send_sig(SIGPIPE, current, 0);
64757 if (!ret)
64758 ret = -EPIPE;
64759@@ -1434,7 +1434,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
64760 page_nr++;
64761 ret += buf->len;
64762
64763- if (pipe->files)
64764+ if (atomic_read(&pipe->files))
64765 do_wakeup = 1;
64766 }
64767
64768diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
64769index 1545b71..7fabe47 100644
64770--- a/fs/fuse/dir.c
64771+++ b/fs/fuse/dir.c
64772@@ -1394,7 +1394,7 @@ static char *read_link(struct dentry *dentry)
64773 return link;
64774 }
64775
64776-static void free_link(char *link)
64777+static void free_link(const char *link)
64778 {
64779 if (!IS_ERR(link))
64780 free_page((unsigned long) link);
64781diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
64782index f42dffb..4a4c435 100644
64783--- a/fs/gfs2/glock.c
64784+++ b/fs/gfs2/glock.c
64785@@ -385,9 +385,9 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
64786 if (held1 != held2) {
64787 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
64788 if (held2)
64789- gl->gl_lockref.count++;
64790+ __lockref_inc(&gl->gl_lockref);
64791 else
64792- gl->gl_lockref.count--;
64793+ __lockref_dec(&gl->gl_lockref);
64794 }
64795 if (held1 && held2 && list_empty(&gl->gl_holders))
64796 clear_bit(GLF_QUEUED, &gl->gl_flags);
64797@@ -614,9 +614,9 @@ out:
64798 out_sched:
64799 clear_bit(GLF_LOCK, &gl->gl_flags);
64800 smp_mb__after_atomic();
64801- gl->gl_lockref.count++;
64802+ __lockref_inc(&gl->gl_lockref);
64803 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
64804- gl->gl_lockref.count--;
64805+ __lockref_dec(&gl->gl_lockref);
64806 return;
64807
64808 out_unlock:
64809@@ -742,7 +742,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
64810 gl->gl_sbd = sdp;
64811 gl->gl_flags = 0;
64812 gl->gl_name = name;
64813- gl->gl_lockref.count = 1;
64814+ __lockref_set(&gl->gl_lockref, 1);
64815 gl->gl_state = LM_ST_UNLOCKED;
64816 gl->gl_target = LM_ST_UNLOCKED;
64817 gl->gl_demote_state = LM_ST_EXCLUSIVE;
64818@@ -1020,9 +1020,9 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
64819 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
64820 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
64821 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
64822- gl->gl_lockref.count++;
64823+ __lockref_inc(&gl->gl_lockref);
64824 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
64825- gl->gl_lockref.count--;
64826+ __lockref_dec(&gl->gl_lockref);
64827 }
64828 run_queue(gl, 1);
64829 spin_unlock(&gl->gl_spin);
64830@@ -1325,7 +1325,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
64831 }
64832 }
64833
64834- gl->gl_lockref.count++;
64835+ __lockref_inc(&gl->gl_lockref);
64836 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
64837 spin_unlock(&gl->gl_spin);
64838
64839@@ -1384,12 +1384,12 @@ add_back_to_lru:
64840 goto add_back_to_lru;
64841 }
64842 clear_bit(GLF_LRU, &gl->gl_flags);
64843- gl->gl_lockref.count++;
64844+ __lockref_inc(&gl->gl_lockref);
64845 if (demote_ok(gl))
64846 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
64847 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
64848 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
64849- gl->gl_lockref.count--;
64850+ __lockref_dec(&gl->gl_lockref);
64851 spin_unlock(&gl->gl_spin);
64852 cond_resched_lock(&lru_lock);
64853 }
64854@@ -1719,7 +1719,7 @@ void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
64855 state2str(gl->gl_demote_state), dtime,
64856 atomic_read(&gl->gl_ail_count),
64857 atomic_read(&gl->gl_revokes),
64858- (int)gl->gl_lockref.count, gl->gl_hold_time);
64859+ __lockref_read(&gl->gl_lockref), gl->gl_hold_time);
64860
64861 list_for_each_entry(gh, &gl->gl_holders, gh_list)
64862 dump_holder(seq, gh);
64863diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
64864index fe91951..ce38a6e 100644
64865--- a/fs/gfs2/glops.c
64866+++ b/fs/gfs2/glops.c
64867@@ -544,9 +544,9 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
64868
64869 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
64870 gl->gl_state == LM_ST_SHARED && ip) {
64871- gl->gl_lockref.count++;
64872+ __lockref_inc(&gl->gl_lockref);
64873 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
64874- gl->gl_lockref.count--;
64875+ __lockref_dec(&gl->gl_lockref);
64876 }
64877 }
64878
64879diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
64880index 3aa17d4..b338075 100644
64881--- a/fs/gfs2/quota.c
64882+++ b/fs/gfs2/quota.c
64883@@ -154,7 +154,7 @@ static enum lru_status gfs2_qd_isolate(struct list_head *item,
64884 if (!spin_trylock(&qd->qd_lockref.lock))
64885 return LRU_SKIP;
64886
64887- if (qd->qd_lockref.count == 0) {
64888+ if (__lockref_read(&qd->qd_lockref) == 0) {
64889 lockref_mark_dead(&qd->qd_lockref);
64890 list_lru_isolate_move(lru, &qd->qd_lru, dispose);
64891 }
64892@@ -221,7 +221,7 @@ static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, str
64893 return NULL;
64894
64895 qd->qd_sbd = sdp;
64896- qd->qd_lockref.count = 1;
64897+ __lockref_set(&qd->qd_lockref, 1);
64898 spin_lock_init(&qd->qd_lockref.lock);
64899 qd->qd_id = qid;
64900 qd->qd_slot = -1;
64901@@ -312,7 +312,7 @@ static void qd_put(struct gfs2_quota_data *qd)
64902 if (lockref_put_or_lock(&qd->qd_lockref))
64903 return;
64904
64905- qd->qd_lockref.count = 0;
64906+ __lockref_set(&qd->qd_lockref, 0);
64907 list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
64908 spin_unlock(&qd->qd_lockref.lock);
64909
64910diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
64911index fd62cae..3494dfa 100644
64912--- a/fs/hostfs/hostfs_kern.c
64913+++ b/fs/hostfs/hostfs_kern.c
64914@@ -908,7 +908,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
64915
64916 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
64917 {
64918- char *s = nd_get_link(nd);
64919+ const char *s = nd_get_link(nd);
64920 if (!IS_ERR(s))
64921 __putname(s);
64922 }
64923diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
64924index c274aca..772fa5e 100644
64925--- a/fs/hugetlbfs/inode.c
64926+++ b/fs/hugetlbfs/inode.c
64927@@ -148,6 +148,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
64928 struct mm_struct *mm = current->mm;
64929 struct vm_area_struct *vma;
64930 struct hstate *h = hstate_file(file);
64931+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
64932 struct vm_unmapped_area_info info;
64933
64934 if (len & ~huge_page_mask(h))
64935@@ -161,17 +162,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
64936 return addr;
64937 }
64938
64939+#ifdef CONFIG_PAX_RANDMMAP
64940+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
64941+#endif
64942+
64943 if (addr) {
64944 addr = ALIGN(addr, huge_page_size(h));
64945 vma = find_vma(mm, addr);
64946- if (TASK_SIZE - len >= addr &&
64947- (!vma || addr + len <= vma->vm_start))
64948+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
64949 return addr;
64950 }
64951
64952 info.flags = 0;
64953 info.length = len;
64954 info.low_limit = TASK_UNMAPPED_BASE;
64955+
64956+#ifdef CONFIG_PAX_RANDMMAP
64957+ if (mm->pax_flags & MF_PAX_RANDMMAP)
64958+ info.low_limit += mm->delta_mmap;
64959+#endif
64960+
64961 info.high_limit = TASK_SIZE;
64962 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
64963 info.align_offset = 0;
64964@@ -912,7 +922,7 @@ static struct file_system_type hugetlbfs_fs_type = {
64965 };
64966 MODULE_ALIAS_FS("hugetlbfs");
64967
64968-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
64969+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
64970
64971 static int can_do_hugetlb_shm(void)
64972 {
64973diff --git a/fs/inode.c b/fs/inode.c
64974index f00b16f..b653fea 100644
64975--- a/fs/inode.c
64976+++ b/fs/inode.c
64977@@ -830,16 +830,20 @@ unsigned int get_next_ino(void)
64978 unsigned int *p = &get_cpu_var(last_ino);
64979 unsigned int res = *p;
64980
64981+start:
64982+
64983 #ifdef CONFIG_SMP
64984 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
64985- static atomic_t shared_last_ino;
64986- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
64987+ static atomic_unchecked_t shared_last_ino;
64988+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
64989
64990 res = next - LAST_INO_BATCH;
64991 }
64992 #endif
64993
64994- *p = ++res;
64995+ if (unlikely(!++res))
64996+ goto start; /* never zero */
64997+ *p = res;
64998 put_cpu_var(last_ino);
64999 return res;
65000 }
65001diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
65002index 4a6cf28..d3a29d3 100644
65003--- a/fs/jffs2/erase.c
65004+++ b/fs/jffs2/erase.c
65005@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
65006 struct jffs2_unknown_node marker = {
65007 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
65008 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
65009- .totlen = cpu_to_je32(c->cleanmarker_size)
65010+ .totlen = cpu_to_je32(c->cleanmarker_size),
65011+ .hdr_crc = cpu_to_je32(0)
65012 };
65013
65014 jffs2_prealloc_raw_node_refs(c, jeb, 1);
65015diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
65016index 09ed551..45684f8 100644
65017--- a/fs/jffs2/wbuf.c
65018+++ b/fs/jffs2/wbuf.c
65019@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
65020 {
65021 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
65022 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
65023- .totlen = constant_cpu_to_je32(8)
65024+ .totlen = constant_cpu_to_je32(8),
65025+ .hdr_crc = constant_cpu_to_je32(0)
65026 };
65027
65028 /*
65029diff --git a/fs/jfs/super.c b/fs/jfs/super.c
65030index 5d30c56..8c45372 100644
65031--- a/fs/jfs/super.c
65032+++ b/fs/jfs/super.c
65033@@ -901,7 +901,7 @@ static int __init init_jfs_fs(void)
65034
65035 jfs_inode_cachep =
65036 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
65037- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
65038+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
65039 init_once);
65040 if (jfs_inode_cachep == NULL)
65041 return -ENOMEM;
65042diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
65043index 6acc964..eca491f 100644
65044--- a/fs/kernfs/dir.c
65045+++ b/fs/kernfs/dir.c
65046@@ -182,7 +182,7 @@ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
65047 *
65048 * Returns 31 bit hash of ns + name (so it fits in an off_t )
65049 */
65050-static unsigned int kernfs_name_hash(const char *name, const void *ns)
65051+static unsigned int kernfs_name_hash(const unsigned char *name, const void *ns)
65052 {
65053 unsigned long hash = init_name_hash();
65054 unsigned int len = strlen(name);
65055@@ -831,6 +831,12 @@ static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry,
65056 ret = scops->mkdir(parent, dentry->d_name.name, mode);
65057
65058 kernfs_put_active(parent);
65059+
65060+ if (!ret) {
65061+ struct dentry *dentry_ret = kernfs_iop_lookup(dir, dentry, 0);
65062+ ret = PTR_ERR_OR_ZERO(dentry_ret);
65063+ }
65064+
65065 return ret;
65066 }
65067
65068diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
65069index 2bacb99..f745182 100644
65070--- a/fs/kernfs/file.c
65071+++ b/fs/kernfs/file.c
65072@@ -34,7 +34,7 @@ static DEFINE_MUTEX(kernfs_open_file_mutex);
65073
65074 struct kernfs_open_node {
65075 atomic_t refcnt;
65076- atomic_t event;
65077+ atomic_unchecked_t event;
65078 wait_queue_head_t poll;
65079 struct list_head files; /* goes through kernfs_open_file.list */
65080 };
65081@@ -163,7 +163,7 @@ static int kernfs_seq_show(struct seq_file *sf, void *v)
65082 {
65083 struct kernfs_open_file *of = sf->private;
65084
65085- of->event = atomic_read(&of->kn->attr.open->event);
65086+ of->event = atomic_read_unchecked(&of->kn->attr.open->event);
65087
65088 return of->kn->attr.ops->seq_show(sf, v);
65089 }
65090@@ -207,7 +207,7 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
65091 goto out_free;
65092 }
65093
65094- of->event = atomic_read(&of->kn->attr.open->event);
65095+ of->event = atomic_read_unchecked(&of->kn->attr.open->event);
65096 ops = kernfs_ops(of->kn);
65097 if (ops->read)
65098 len = ops->read(of, buf, len, *ppos);
65099@@ -272,7 +272,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
65100 {
65101 struct kernfs_open_file *of = kernfs_of(file);
65102 const struct kernfs_ops *ops;
65103- size_t len;
65104+ ssize_t len;
65105 char *buf;
65106
65107 if (of->atomic_write_len) {
65108@@ -385,12 +385,12 @@ static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
65109 return ret;
65110 }
65111
65112-static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
65113- void *buf, int len, int write)
65114+static ssize_t kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
65115+ void *buf, size_t len, int write)
65116 {
65117 struct file *file = vma->vm_file;
65118 struct kernfs_open_file *of = kernfs_of(file);
65119- int ret;
65120+ ssize_t ret;
65121
65122 if (!of->vm_ops)
65123 return -EINVAL;
65124@@ -569,7 +569,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
65125 return -ENOMEM;
65126
65127 atomic_set(&new_on->refcnt, 0);
65128- atomic_set(&new_on->event, 1);
65129+ atomic_set_unchecked(&new_on->event, 1);
65130 init_waitqueue_head(&new_on->poll);
65131 INIT_LIST_HEAD(&new_on->files);
65132 goto retry;
65133@@ -793,7 +793,7 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
65134
65135 kernfs_put_active(kn);
65136
65137- if (of->event != atomic_read(&on->event))
65138+ if (of->event != atomic_read_unchecked(&on->event))
65139 goto trigger;
65140
65141 return DEFAULT_POLLMASK;
65142@@ -824,7 +824,7 @@ repeat:
65143
65144 on = kn->attr.open;
65145 if (on) {
65146- atomic_inc(&on->event);
65147+ atomic_inc_unchecked(&on->event);
65148 wake_up_interruptible(&on->poll);
65149 }
65150
65151diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
65152index 8a19889..4c3069a 100644
65153--- a/fs/kernfs/symlink.c
65154+++ b/fs/kernfs/symlink.c
65155@@ -128,7 +128,7 @@ static void *kernfs_iop_follow_link(struct dentry *dentry, struct nameidata *nd)
65156 static void kernfs_iop_put_link(struct dentry *dentry, struct nameidata *nd,
65157 void *cookie)
65158 {
65159- char *page = nd_get_link(nd);
65160+ const char *page = nd_get_link(nd);
65161 if (!IS_ERR(page))
65162 free_page((unsigned long)page);
65163 }
65164diff --git a/fs/libfs.c b/fs/libfs.c
65165index 0ab6512..cd9982d 100644
65166--- a/fs/libfs.c
65167+++ b/fs/libfs.c
65168@@ -160,6 +160,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
65169
65170 for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
65171 struct dentry *next = list_entry(p, struct dentry, d_child);
65172+ char d_name[sizeof(next->d_iname)];
65173+ const unsigned char *name;
65174+
65175 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
65176 if (!simple_positive(next)) {
65177 spin_unlock(&next->d_lock);
65178@@ -168,7 +171,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
65179
65180 spin_unlock(&next->d_lock);
65181 spin_unlock(&dentry->d_lock);
65182- if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
65183+ name = next->d_name.name;
65184+ if (name == next->d_iname) {
65185+ memcpy(d_name, name, next->d_name.len);
65186+ name = d_name;
65187+ }
65188+ if (!dir_emit(ctx, name, next->d_name.len,
65189 next->d_inode->i_ino, dt_type(next->d_inode)))
65190 return 0;
65191 spin_lock(&dentry->d_lock);
65192@@ -1027,7 +1035,7 @@ EXPORT_SYMBOL(noop_fsync);
65193 void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
65194 void *cookie)
65195 {
65196- char *s = nd_get_link(nd);
65197+ const char *s = nd_get_link(nd);
65198 if (!IS_ERR(s))
65199 kfree(s);
65200 }
65201diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
65202index acd3947..1f896e2 100644
65203--- a/fs/lockd/clntproc.c
65204+++ b/fs/lockd/clntproc.c
65205@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
65206 /*
65207 * Cookie counter for NLM requests
65208 */
65209-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
65210+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
65211
65212 void nlmclnt_next_cookie(struct nlm_cookie *c)
65213 {
65214- u32 cookie = atomic_inc_return(&nlm_cookie);
65215+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
65216
65217 memcpy(c->data, &cookie, 4);
65218 c->len=4;
65219diff --git a/fs/mount.h b/fs/mount.h
65220index 6a61c2b..bd79179 100644
65221--- a/fs/mount.h
65222+++ b/fs/mount.h
65223@@ -13,7 +13,7 @@ struct mnt_namespace {
65224 u64 seq; /* Sequence number to prevent loops */
65225 wait_queue_head_t poll;
65226 u64 event;
65227-};
65228+} __randomize_layout;
65229
65230 struct mnt_pcp {
65231 int mnt_count;
65232@@ -65,7 +65,7 @@ struct mount {
65233 struct hlist_head mnt_pins;
65234 struct fs_pin mnt_umount;
65235 struct dentry *mnt_ex_mountpoint;
65236-};
65237+} __randomize_layout;
65238
65239 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
65240
65241diff --git a/fs/namei.c b/fs/namei.c
65242index 50a8583..44c470a 100644
65243--- a/fs/namei.c
65244+++ b/fs/namei.c
65245@@ -337,17 +337,32 @@ int generic_permission(struct inode *inode, int mask)
65246 if (ret != -EACCES)
65247 return ret;
65248
65249+#ifdef CONFIG_GRKERNSEC
65250+ /* we'll block if we have to log due to a denied capability use */
65251+ if (mask & MAY_NOT_BLOCK)
65252+ return -ECHILD;
65253+#endif
65254+
65255 if (S_ISDIR(inode->i_mode)) {
65256 /* DACs are overridable for directories */
65257- if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
65258- return 0;
65259 if (!(mask & MAY_WRITE))
65260- if (capable_wrt_inode_uidgid(inode,
65261- CAP_DAC_READ_SEARCH))
65262+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
65263+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
65264 return 0;
65265+ if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
65266+ return 0;
65267 return -EACCES;
65268 }
65269 /*
65270+ * Searching includes executable on directories, else just read.
65271+ */
65272+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
65273+ if (mask == MAY_READ)
65274+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
65275+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
65276+ return 0;
65277+
65278+ /*
65279 * Read/write DACs are always overridable.
65280 * Executable DACs are overridable when there is
65281 * at least one exec bit set.
65282@@ -356,14 +371,6 @@ int generic_permission(struct inode *inode, int mask)
65283 if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
65284 return 0;
65285
65286- /*
65287- * Searching includes executable on directories, else just read.
65288- */
65289- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
65290- if (mask == MAY_READ)
65291- if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
65292- return 0;
65293-
65294 return -EACCES;
65295 }
65296 EXPORT_SYMBOL(generic_permission);
65297@@ -503,7 +510,7 @@ struct nameidata {
65298 int last_type;
65299 unsigned depth;
65300 struct file *base;
65301- char *saved_names[MAX_NESTED_LINKS + 1];
65302+ const char *saved_names[MAX_NESTED_LINKS + 1];
65303 };
65304
65305 /*
65306@@ -714,13 +721,13 @@ void nd_jump_link(struct nameidata *nd, struct path *path)
65307 nd->flags |= LOOKUP_JUMPED;
65308 }
65309
65310-void nd_set_link(struct nameidata *nd, char *path)
65311+void nd_set_link(struct nameidata *nd, const char *path)
65312 {
65313 nd->saved_names[nd->depth] = path;
65314 }
65315 EXPORT_SYMBOL(nd_set_link);
65316
65317-char *nd_get_link(struct nameidata *nd)
65318+const char *nd_get_link(const struct nameidata *nd)
65319 {
65320 return nd->saved_names[nd->depth];
65321 }
65322@@ -855,7 +862,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
65323 {
65324 struct dentry *dentry = link->dentry;
65325 int error;
65326- char *s;
65327+ const char *s;
65328
65329 BUG_ON(nd->flags & LOOKUP_RCU);
65330
65331@@ -876,6 +883,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
65332 if (error)
65333 goto out_put_nd_path;
65334
65335+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
65336+ dentry->d_inode, dentry, nd->path.mnt)) {
65337+ error = -EACCES;
65338+ goto out_put_nd_path;
65339+ }
65340+
65341 nd->last_type = LAST_BIND;
65342 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
65343 error = PTR_ERR(*p);
65344@@ -1640,6 +1653,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
65345 if (res)
65346 break;
65347 res = walk_component(nd, path, LOOKUP_FOLLOW);
65348+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
65349+ res = -EACCES;
65350 put_link(nd, &link, cookie);
65351 } while (res > 0);
65352
65353@@ -1712,7 +1727,7 @@ EXPORT_SYMBOL(full_name_hash);
65354 static inline u64 hash_name(const char *name)
65355 {
65356 unsigned long a, b, adata, bdata, mask, hash, len;
65357- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
65358+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
65359
65360 hash = a = 0;
65361 len = -sizeof(unsigned long);
65362@@ -2007,6 +2022,8 @@ static int path_lookupat(int dfd, const char *name,
65363 if (err)
65364 break;
65365 err = lookup_last(nd, &path);
65366+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
65367+ err = -EACCES;
65368 put_link(nd, &link, cookie);
65369 }
65370 }
65371@@ -2014,6 +2031,13 @@ static int path_lookupat(int dfd, const char *name,
65372 if (!err)
65373 err = complete_walk(nd);
65374
65375+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
65376+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
65377+ path_put(&nd->path);
65378+ err = -ENOENT;
65379+ }
65380+ }
65381+
65382 if (!err && nd->flags & LOOKUP_DIRECTORY) {
65383 if (!d_can_lookup(nd->path.dentry)) {
65384 path_put(&nd->path);
65385@@ -2035,8 +2059,15 @@ static int filename_lookup(int dfd, struct filename *name,
65386 retval = path_lookupat(dfd, name->name,
65387 flags | LOOKUP_REVAL, nd);
65388
65389- if (likely(!retval))
65390+ if (likely(!retval)) {
65391 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
65392+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
65393+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
65394+ path_put(&nd->path);
65395+ return -ENOENT;
65396+ }
65397+ }
65398+ }
65399 return retval;
65400 }
65401
65402@@ -2615,6 +2646,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
65403 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
65404 return -EPERM;
65405
65406+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
65407+ return -EPERM;
65408+ if (gr_handle_rawio(inode))
65409+ return -EPERM;
65410+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
65411+ return -EACCES;
65412+
65413 return 0;
65414 }
65415
65416@@ -2846,7 +2884,7 @@ looked_up:
65417 * cleared otherwise prior to returning.
65418 */
65419 static int lookup_open(struct nameidata *nd, struct path *path,
65420- struct file *file,
65421+ struct path *link, struct file *file,
65422 const struct open_flags *op,
65423 bool got_write, int *opened)
65424 {
65425@@ -2881,6 +2919,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
65426 /* Negative dentry, just create the file */
65427 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
65428 umode_t mode = op->mode;
65429+
65430+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
65431+ error = -EACCES;
65432+ goto out_dput;
65433+ }
65434+
65435+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
65436+ error = -EACCES;
65437+ goto out_dput;
65438+ }
65439+
65440 if (!IS_POSIXACL(dir->d_inode))
65441 mode &= ~current_umask();
65442 /*
65443@@ -2902,6 +2951,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
65444 nd->flags & LOOKUP_EXCL);
65445 if (error)
65446 goto out_dput;
65447+ else
65448+ gr_handle_create(dentry, nd->path.mnt);
65449 }
65450 out_no_open:
65451 path->dentry = dentry;
65452@@ -2916,7 +2967,7 @@ out_dput:
65453 /*
65454 * Handle the last step of open()
65455 */
65456-static int do_last(struct nameidata *nd, struct path *path,
65457+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
65458 struct file *file, const struct open_flags *op,
65459 int *opened, struct filename *name)
65460 {
65461@@ -2966,6 +3017,15 @@ static int do_last(struct nameidata *nd, struct path *path,
65462 if (error)
65463 return error;
65464
65465+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
65466+ error = -ENOENT;
65467+ goto out;
65468+ }
65469+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
65470+ error = -EACCES;
65471+ goto out;
65472+ }
65473+
65474 audit_inode(name, dir, LOOKUP_PARENT);
65475 error = -EISDIR;
65476 /* trailing slashes? */
65477@@ -2985,7 +3045,7 @@ retry_lookup:
65478 */
65479 }
65480 mutex_lock(&dir->d_inode->i_mutex);
65481- error = lookup_open(nd, path, file, op, got_write, opened);
65482+ error = lookup_open(nd, path, link, file, op, got_write, opened);
65483 mutex_unlock(&dir->d_inode->i_mutex);
65484
65485 if (error <= 0) {
65486@@ -3009,11 +3069,28 @@ retry_lookup:
65487 goto finish_open_created;
65488 }
65489
65490+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
65491+ error = -ENOENT;
65492+ goto exit_dput;
65493+ }
65494+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
65495+ error = -EACCES;
65496+ goto exit_dput;
65497+ }
65498+
65499 /*
65500 * create/update audit record if it already exists.
65501 */
65502- if (d_is_positive(path->dentry))
65503+ if (d_is_positive(path->dentry)) {
65504+ /* only check if O_CREAT is specified, all other checks need to go
65505+ into may_open */
65506+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
65507+ error = -EACCES;
65508+ goto exit_dput;
65509+ }
65510+
65511 audit_inode(name, path->dentry, 0);
65512+ }
65513
65514 /*
65515 * If atomic_open() acquired write access it is dropped now due to
65516@@ -3055,6 +3132,11 @@ finish_lookup:
65517 }
65518 }
65519 BUG_ON(inode != path->dentry->d_inode);
65520+ /* if we're resolving a symlink to another symlink */
65521+ if (link && gr_handle_symlink_owner(link, inode)) {
65522+ error = -EACCES;
65523+ goto out;
65524+ }
65525 return 1;
65526 }
65527
65528@@ -3074,7 +3156,18 @@ finish_open:
65529 path_put(&save_parent);
65530 return error;
65531 }
65532+
65533+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
65534+ error = -ENOENT;
65535+ goto out;
65536+ }
65537+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
65538+ error = -EACCES;
65539+ goto out;
65540+ }
65541+
65542 audit_inode(name, nd->path.dentry, 0);
65543+
65544 error = -EISDIR;
65545 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
65546 goto out;
65547@@ -3235,7 +3328,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
65548 if (unlikely(error))
65549 goto out;
65550
65551- error = do_last(nd, &path, file, op, &opened, pathname);
65552+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
65553 while (unlikely(error > 0)) { /* trailing symlink */
65554 struct path link = path;
65555 void *cookie;
65556@@ -3253,7 +3346,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
65557 error = follow_link(&link, nd, &cookie);
65558 if (unlikely(error))
65559 break;
65560- error = do_last(nd, &path, file, op, &opened, pathname);
65561+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
65562 put_link(nd, &link, cookie);
65563 }
65564 out:
65565@@ -3356,9 +3449,11 @@ static struct dentry *filename_create(int dfd, struct filename *name,
65566 goto unlock;
65567
65568 error = -EEXIST;
65569- if (d_is_positive(dentry))
65570+ if (d_is_positive(dentry)) {
65571+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt))
65572+ error = -ENOENT;
65573 goto fail;
65574-
65575+ }
65576 /*
65577 * Special case - lookup gave negative, but... we had foo/bar/
65578 * From the vfs_mknod() POV we just have a negative dentry -
65579@@ -3423,6 +3518,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
65580 }
65581 EXPORT_SYMBOL(user_path_create);
65582
65583+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
65584+{
65585+ struct filename *tmp = getname(pathname);
65586+ struct dentry *res;
65587+ if (IS_ERR(tmp))
65588+ return ERR_CAST(tmp);
65589+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
65590+ if (IS_ERR(res))
65591+ putname(tmp);
65592+ else
65593+ *to = tmp;
65594+ return res;
65595+}
65596+
65597 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
65598 {
65599 int error = may_create(dir, dentry);
65600@@ -3486,6 +3595,17 @@ retry:
65601
65602 if (!IS_POSIXACL(path.dentry->d_inode))
65603 mode &= ~current_umask();
65604+
65605+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
65606+ error = -EPERM;
65607+ goto out;
65608+ }
65609+
65610+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
65611+ error = -EACCES;
65612+ goto out;
65613+ }
65614+
65615 error = security_path_mknod(&path, dentry, mode, dev);
65616 if (error)
65617 goto out;
65618@@ -3501,6 +3621,8 @@ retry:
65619 error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
65620 break;
65621 }
65622+ if (!error)
65623+ gr_handle_create(dentry, path.mnt);
65624 out:
65625 done_path_create(&path, dentry);
65626 if (retry_estale(error, lookup_flags)) {
65627@@ -3555,9 +3677,16 @@ retry:
65628
65629 if (!IS_POSIXACL(path.dentry->d_inode))
65630 mode &= ~current_umask();
65631+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
65632+ error = -EACCES;
65633+ goto out;
65634+ }
65635 error = security_path_mkdir(&path, dentry, mode);
65636 if (!error)
65637 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
65638+ if (!error)
65639+ gr_handle_create(dentry, path.mnt);
65640+out:
65641 done_path_create(&path, dentry);
65642 if (retry_estale(error, lookup_flags)) {
65643 lookup_flags |= LOOKUP_REVAL;
65644@@ -3590,7 +3719,7 @@ void dentry_unhash(struct dentry *dentry)
65645 {
65646 shrink_dcache_parent(dentry);
65647 spin_lock(&dentry->d_lock);
65648- if (dentry->d_lockref.count == 1)
65649+ if (__lockref_read(&dentry->d_lockref) == 1)
65650 __d_drop(dentry);
65651 spin_unlock(&dentry->d_lock);
65652 }
65653@@ -3641,6 +3770,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
65654 struct filename *name;
65655 struct dentry *dentry;
65656 struct nameidata nd;
65657+ u64 saved_ino = 0;
65658+ dev_t saved_dev = 0;
65659 unsigned int lookup_flags = 0;
65660 retry:
65661 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
65662@@ -3673,10 +3804,21 @@ retry:
65663 error = -ENOENT;
65664 goto exit3;
65665 }
65666+
65667+ saved_ino = gr_get_ino_from_dentry(dentry);
65668+ saved_dev = gr_get_dev_from_dentry(dentry);
65669+
65670+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
65671+ error = -EACCES;
65672+ goto exit3;
65673+ }
65674+
65675 error = security_path_rmdir(&nd.path, dentry);
65676 if (error)
65677 goto exit3;
65678 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
65679+ if (!error && (saved_dev || saved_ino))
65680+ gr_handle_delete(saved_ino, saved_dev);
65681 exit3:
65682 dput(dentry);
65683 exit2:
65684@@ -3769,6 +3911,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
65685 struct nameidata nd;
65686 struct inode *inode = NULL;
65687 struct inode *delegated_inode = NULL;
65688+ u64 saved_ino = 0;
65689+ dev_t saved_dev = 0;
65690 unsigned int lookup_flags = 0;
65691 retry:
65692 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
65693@@ -3795,10 +3939,22 @@ retry_deleg:
65694 if (d_is_negative(dentry))
65695 goto slashes;
65696 ihold(inode);
65697+
65698+ if (inode->i_nlink <= 1) {
65699+ saved_ino = gr_get_ino_from_dentry(dentry);
65700+ saved_dev = gr_get_dev_from_dentry(dentry);
65701+ }
65702+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
65703+ error = -EACCES;
65704+ goto exit2;
65705+ }
65706+
65707 error = security_path_unlink(&nd.path, dentry);
65708 if (error)
65709 goto exit2;
65710 error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
65711+ if (!error && (saved_ino || saved_dev))
65712+ gr_handle_delete(saved_ino, saved_dev);
65713 exit2:
65714 dput(dentry);
65715 }
65716@@ -3887,9 +4043,17 @@ retry:
65717 if (IS_ERR(dentry))
65718 goto out_putname;
65719
65720+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
65721+ error = -EACCES;
65722+ goto out;
65723+ }
65724+
65725 error = security_path_symlink(&path, dentry, from->name);
65726 if (!error)
65727 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
65728+ if (!error)
65729+ gr_handle_create(dentry, path.mnt);
65730+out:
65731 done_path_create(&path, dentry);
65732 if (retry_estale(error, lookup_flags)) {
65733 lookup_flags |= LOOKUP_REVAL;
65734@@ -3993,6 +4157,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
65735 struct dentry *new_dentry;
65736 struct path old_path, new_path;
65737 struct inode *delegated_inode = NULL;
65738+ struct filename *to = NULL;
65739 int how = 0;
65740 int error;
65741
65742@@ -4016,7 +4181,7 @@ retry:
65743 if (error)
65744 return error;
65745
65746- new_dentry = user_path_create(newdfd, newname, &new_path,
65747+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
65748 (how & LOOKUP_REVAL));
65749 error = PTR_ERR(new_dentry);
65750 if (IS_ERR(new_dentry))
65751@@ -4028,11 +4193,28 @@ retry:
65752 error = may_linkat(&old_path);
65753 if (unlikely(error))
65754 goto out_dput;
65755+
65756+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
65757+ old_path.dentry->d_inode,
65758+ old_path.dentry->d_inode->i_mode, to)) {
65759+ error = -EACCES;
65760+ goto out_dput;
65761+ }
65762+
65763+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
65764+ old_path.dentry, old_path.mnt, to)) {
65765+ error = -EACCES;
65766+ goto out_dput;
65767+ }
65768+
65769 error = security_path_link(old_path.dentry, &new_path, new_dentry);
65770 if (error)
65771 goto out_dput;
65772 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
65773+ if (!error)
65774+ gr_handle_create(new_dentry, new_path.mnt);
65775 out_dput:
65776+ putname(to);
65777 done_path_create(&new_path, new_dentry);
65778 if (delegated_inode) {
65779 error = break_deleg_wait(&delegated_inode);
65780@@ -4348,6 +4530,20 @@ retry_deleg:
65781 if (new_dentry == trap)
65782 goto exit5;
65783
65784+ if (gr_bad_chroot_rename(old_dentry, oldnd.path.mnt, new_dentry, newnd.path.mnt)) {
65785+ /* use EXDEV error to cause 'mv' to switch to an alternative
65786+ * method for usability
65787+ */
65788+ error = -EXDEV;
65789+ goto exit5;
65790+ }
65791+
65792+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
65793+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
65794+ to, flags);
65795+ if (error)
65796+ goto exit5;
65797+
65798 error = security_path_rename(&oldnd.path, old_dentry,
65799 &newnd.path, new_dentry, flags);
65800 if (error)
65801@@ -4355,6 +4551,9 @@ retry_deleg:
65802 error = vfs_rename(old_dir->d_inode, old_dentry,
65803 new_dir->d_inode, new_dentry,
65804 &delegated_inode, flags);
65805+ if (!error)
65806+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
65807+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0, flags);
65808 exit5:
65809 dput(new_dentry);
65810 exit4:
65811@@ -4411,14 +4610,24 @@ EXPORT_SYMBOL(vfs_whiteout);
65812
65813 int readlink_copy(char __user *buffer, int buflen, const char *link)
65814 {
65815+ char tmpbuf[64];
65816+ const char *newlink;
65817 int len = PTR_ERR(link);
65818+
65819 if (IS_ERR(link))
65820 goto out;
65821
65822 len = strlen(link);
65823 if (len > (unsigned) buflen)
65824 len = buflen;
65825- if (copy_to_user(buffer, link, len))
65826+
65827+ if (len < sizeof(tmpbuf)) {
65828+ memcpy(tmpbuf, link, len);
65829+ newlink = tmpbuf;
65830+ } else
65831+ newlink = link;
65832+
65833+ if (copy_to_user(buffer, newlink, len))
65834 len = -EFAULT;
65835 out:
65836 return len;
65837diff --git a/fs/namespace.c b/fs/namespace.c
65838index 38ed1e1..8500e56 100644
65839--- a/fs/namespace.c
65840+++ b/fs/namespace.c
65841@@ -1480,6 +1480,9 @@ static int do_umount(struct mount *mnt, int flags)
65842 if (!(sb->s_flags & MS_RDONLY))
65843 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
65844 up_write(&sb->s_umount);
65845+
65846+ gr_log_remount(mnt->mnt_devname, retval);
65847+
65848 return retval;
65849 }
65850
65851@@ -1502,6 +1505,9 @@ static int do_umount(struct mount *mnt, int flags)
65852 }
65853 unlock_mount_hash();
65854 namespace_unlock();
65855+
65856+ gr_log_unmount(mnt->mnt_devname, retval);
65857+
65858 return retval;
65859 }
65860
65861@@ -1559,7 +1565,7 @@ static inline bool may_mount(void)
65862 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
65863 */
65864
65865-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
65866+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
65867 {
65868 struct path path;
65869 struct mount *mnt;
65870@@ -1604,7 +1610,7 @@ out:
65871 /*
65872 * The 2.0 compatible umount. No flags.
65873 */
65874-SYSCALL_DEFINE1(oldumount, char __user *, name)
65875+SYSCALL_DEFINE1(oldumount, const char __user *, name)
65876 {
65877 return sys_umount(name, 0);
65878 }
65879@@ -2670,6 +2676,16 @@ long do_mount(const char *dev_name, const char __user *dir_name,
65880 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
65881 MS_STRICTATIME);
65882
65883+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
65884+ retval = -EPERM;
65885+ goto dput_out;
65886+ }
65887+
65888+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
65889+ retval = -EPERM;
65890+ goto dput_out;
65891+ }
65892+
65893 if (flags & MS_REMOUNT)
65894 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
65895 data_page);
65896@@ -2683,7 +2699,10 @@ long do_mount(const char *dev_name, const char __user *dir_name,
65897 retval = do_new_mount(&path, type_page, flags, mnt_flags,
65898 dev_name, data_page);
65899 dput_out:
65900+ gr_log_mount(dev_name, &path, retval);
65901+
65902 path_put(&path);
65903+
65904 return retval;
65905 }
65906
65907@@ -2701,7 +2720,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
65908 * number incrementing at 10Ghz will take 12,427 years to wrap which
65909 * is effectively never, so we can ignore the possibility.
65910 */
65911-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
65912+static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
65913
65914 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
65915 {
65916@@ -2717,7 +2736,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
65917 return ERR_PTR(ret);
65918 }
65919 new_ns->ns.ops = &mntns_operations;
65920- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
65921+ new_ns->seq = atomic64_add_return_unchecked(1, &mnt_ns_seq);
65922 atomic_set(&new_ns->count, 1);
65923 new_ns->root = NULL;
65924 INIT_LIST_HEAD(&new_ns->list);
65925@@ -2727,7 +2746,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
65926 return new_ns;
65927 }
65928
65929-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
65930+__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
65931 struct user_namespace *user_ns, struct fs_struct *new_fs)
65932 {
65933 struct mnt_namespace *new_ns;
65934@@ -2848,8 +2867,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
65935 }
65936 EXPORT_SYMBOL(mount_subtree);
65937
65938-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
65939- char __user *, type, unsigned long, flags, void __user *, data)
65940+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
65941+ const char __user *, type, unsigned long, flags, void __user *, data)
65942 {
65943 int ret;
65944 char *kernel_type;
65945@@ -2955,6 +2974,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
65946 if (error)
65947 goto out2;
65948
65949+ if (gr_handle_chroot_pivot()) {
65950+ error = -EPERM;
65951+ goto out2;
65952+ }
65953+
65954 get_fs_root(current->fs, &root);
65955 old_mp = lock_mount(&old);
65956 error = PTR_ERR(old_mp);
65957@@ -3235,7 +3259,7 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
65958 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
65959 return -EPERM;
65960
65961- if (fs->users != 1)
65962+ if (atomic_read(&fs->users) != 1)
65963 return -EINVAL;
65964
65965 get_mnt_ns(mnt_ns);
65966diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
65967index 19ca95c..b28702c 100644
65968--- a/fs/nfs/callback_xdr.c
65969+++ b/fs/nfs/callback_xdr.c
65970@@ -51,7 +51,7 @@ struct callback_op {
65971 callback_decode_arg_t decode_args;
65972 callback_encode_res_t encode_res;
65973 long res_maxsize;
65974-};
65975+} __do_const;
65976
65977 static struct callback_op callback_ops[];
65978
65979diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
65980index d42dff6..ecbdf42 100644
65981--- a/fs/nfs/inode.c
65982+++ b/fs/nfs/inode.c
65983@@ -1270,16 +1270,16 @@ static int nfs_ctime_need_update(const struct inode *inode, const struct nfs_fat
65984 return timespec_compare(&fattr->ctime, &inode->i_ctime) > 0;
65985 }
65986
65987-static atomic_long_t nfs_attr_generation_counter;
65988+static atomic_long_unchecked_t nfs_attr_generation_counter;
65989
65990 static unsigned long nfs_read_attr_generation_counter(void)
65991 {
65992- return atomic_long_read(&nfs_attr_generation_counter);
65993+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
65994 }
65995
65996 unsigned long nfs_inc_attr_generation_counter(void)
65997 {
65998- return atomic_long_inc_return(&nfs_attr_generation_counter);
65999+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
66000 }
66001 EXPORT_SYMBOL_GPL(nfs_inc_attr_generation_counter);
66002
66003diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
66004index 5416968..0942042 100644
66005--- a/fs/nfsd/nfs4proc.c
66006+++ b/fs/nfsd/nfs4proc.c
66007@@ -1496,7 +1496,7 @@ struct nfsd4_operation {
66008 nfsd4op_rsize op_rsize_bop;
66009 stateid_getter op_get_currentstateid;
66010 stateid_setter op_set_currentstateid;
66011-};
66012+} __do_const;
66013
66014 static struct nfsd4_operation nfsd4_ops[];
66015
66016diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
66017index 5b33ce1..c2a92aa 100644
66018--- a/fs/nfsd/nfs4xdr.c
66019+++ b/fs/nfsd/nfs4xdr.c
66020@@ -1703,7 +1703,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
66021
66022 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
66023
66024-static nfsd4_dec nfsd4_dec_ops[] = {
66025+static const nfsd4_dec nfsd4_dec_ops[] = {
66026 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
66027 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
66028 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
66029diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
66030index 46ec934..f384e41 100644
66031--- a/fs/nfsd/nfscache.c
66032+++ b/fs/nfsd/nfscache.c
66033@@ -541,7 +541,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
66034 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
66035 u32 hash;
66036 struct nfsd_drc_bucket *b;
66037- int len;
66038+ long len;
66039 size_t bufsize = 0;
66040
66041 if (!rp)
66042@@ -550,11 +550,14 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
66043 hash = nfsd_cache_hash(rp->c_xid);
66044 b = &drc_hashtbl[hash];
66045
66046- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
66047- len >>= 2;
66048+ if (statp) {
66049+ len = (char*)statp - (char*)resv->iov_base;
66050+ len = resv->iov_len - len;
66051+ len >>= 2;
66052+ }
66053
66054 /* Don't cache excessive amounts of data and XDR failures */
66055- if (!statp || len > (256 >> 2)) {
66056+ if (!statp || len > (256 >> 2) || len < 0) {
66057 nfsd_reply_cache_free(b, rp);
66058 return;
66059 }
66060@@ -562,7 +565,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
66061 switch (cachetype) {
66062 case RC_REPLSTAT:
66063 if (len != 1)
66064- printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
66065+ printk("nfsd: RC_REPLSTAT/reply len %ld!\n",len);
66066 rp->c_replstat = *statp;
66067 break;
66068 case RC_REPLBUFF:
66069diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
66070index 3685265..e77261e 100644
66071--- a/fs/nfsd/vfs.c
66072+++ b/fs/nfsd/vfs.c
66073@@ -893,7 +893,7 @@ __be32 nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen,
66074
66075 oldfs = get_fs();
66076 set_fs(KERNEL_DS);
66077- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
66078+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
66079 set_fs(oldfs);
66080 return nfsd_finish_read(file, count, host_err);
66081 }
66082@@ -980,7 +980,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
66083
66084 /* Write the data. */
66085 oldfs = get_fs(); set_fs(KERNEL_DS);
66086- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
66087+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
66088 set_fs(oldfs);
66089 if (host_err < 0)
66090 goto out_nfserr;
66091@@ -1525,7 +1525,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
66092 */
66093
66094 oldfs = get_fs(); set_fs(KERNEL_DS);
66095- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
66096+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
66097 set_fs(oldfs);
66098
66099 if (host_err < 0)
66100diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
66101index 52ccd34..7a6b202 100644
66102--- a/fs/nls/nls_base.c
66103+++ b/fs/nls/nls_base.c
66104@@ -234,21 +234,25 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
66105
66106 int __register_nls(struct nls_table *nls, struct module *owner)
66107 {
66108- struct nls_table ** tmp = &tables;
66109+ struct nls_table *tmp = tables;
66110
66111 if (nls->next)
66112 return -EBUSY;
66113
66114- nls->owner = owner;
66115+ pax_open_kernel();
66116+ *(void **)&nls->owner = owner;
66117+ pax_close_kernel();
66118 spin_lock(&nls_lock);
66119- while (*tmp) {
66120- if (nls == *tmp) {
66121+ while (tmp) {
66122+ if (nls == tmp) {
66123 spin_unlock(&nls_lock);
66124 return -EBUSY;
66125 }
66126- tmp = &(*tmp)->next;
66127+ tmp = tmp->next;
66128 }
66129- nls->next = tables;
66130+ pax_open_kernel();
66131+ *(struct nls_table **)&nls->next = tables;
66132+ pax_close_kernel();
66133 tables = nls;
66134 spin_unlock(&nls_lock);
66135 return 0;
66136@@ -257,12 +261,14 @@ EXPORT_SYMBOL(__register_nls);
66137
66138 int unregister_nls(struct nls_table * nls)
66139 {
66140- struct nls_table ** tmp = &tables;
66141+ struct nls_table * const * tmp = &tables;
66142
66143 spin_lock(&nls_lock);
66144 while (*tmp) {
66145 if (nls == *tmp) {
66146- *tmp = nls->next;
66147+ pax_open_kernel();
66148+ *(struct nls_table **)tmp = nls->next;
66149+ pax_close_kernel();
66150 spin_unlock(&nls_lock);
66151 return 0;
66152 }
66153@@ -272,7 +278,7 @@ int unregister_nls(struct nls_table * nls)
66154 return -EINVAL;
66155 }
66156
66157-static struct nls_table *find_nls(char *charset)
66158+static struct nls_table *find_nls(const char *charset)
66159 {
66160 struct nls_table *nls;
66161 spin_lock(&nls_lock);
66162@@ -288,7 +294,7 @@ static struct nls_table *find_nls(char *charset)
66163 return nls;
66164 }
66165
66166-struct nls_table *load_nls(char *charset)
66167+struct nls_table *load_nls(const char *charset)
66168 {
66169 return try_then_request_module(find_nls(charset), "nls_%s", charset);
66170 }
66171diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
66172index 162b3f1..6076a7c 100644
66173--- a/fs/nls/nls_euc-jp.c
66174+++ b/fs/nls/nls_euc-jp.c
66175@@ -560,8 +560,10 @@ static int __init init_nls_euc_jp(void)
66176 p_nls = load_nls("cp932");
66177
66178 if (p_nls) {
66179- table.charset2upper = p_nls->charset2upper;
66180- table.charset2lower = p_nls->charset2lower;
66181+ pax_open_kernel();
66182+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
66183+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
66184+ pax_close_kernel();
66185 return register_nls(&table);
66186 }
66187
66188diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
66189index a80a741..7b96e1b 100644
66190--- a/fs/nls/nls_koi8-ru.c
66191+++ b/fs/nls/nls_koi8-ru.c
66192@@ -62,8 +62,10 @@ static int __init init_nls_koi8_ru(void)
66193 p_nls = load_nls("koi8-u");
66194
66195 if (p_nls) {
66196- table.charset2upper = p_nls->charset2upper;
66197- table.charset2lower = p_nls->charset2lower;
66198+ pax_open_kernel();
66199+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
66200+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
66201+ pax_close_kernel();
66202 return register_nls(&table);
66203 }
66204
66205diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
66206index cf27550..6c70f29d 100644
66207--- a/fs/notify/fanotify/fanotify_user.c
66208+++ b/fs/notify/fanotify/fanotify_user.c
66209@@ -216,8 +216,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
66210
66211 fd = fanotify_event_metadata.fd;
66212 ret = -EFAULT;
66213- if (copy_to_user(buf, &fanotify_event_metadata,
66214- fanotify_event_metadata.event_len))
66215+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
66216+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
66217 goto out_close_fd;
66218
66219 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
66220diff --git a/fs/notify/notification.c b/fs/notify/notification.c
66221index a95d8e0..a91a5fd 100644
66222--- a/fs/notify/notification.c
66223+++ b/fs/notify/notification.c
66224@@ -48,7 +48,7 @@
66225 #include <linux/fsnotify_backend.h>
66226 #include "fsnotify.h"
66227
66228-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
66229+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
66230
66231 /**
66232 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
66233@@ -56,7 +56,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
66234 */
66235 u32 fsnotify_get_cookie(void)
66236 {
66237- return atomic_inc_return(&fsnotify_sync_cookie);
66238+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
66239 }
66240 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
66241
66242diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
66243index 9e38daf..5727cae 100644
66244--- a/fs/ntfs/dir.c
66245+++ b/fs/ntfs/dir.c
66246@@ -1310,7 +1310,7 @@ find_next_index_buffer:
66247 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
66248 ~(s64)(ndir->itype.index.block_size - 1)));
66249 /* Bounds checks. */
66250- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
66251+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
66252 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
66253 "inode 0x%lx or driver bug.", vdir->i_ino);
66254 goto err_out;
66255diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
66256index 1da9b2d..9cca092a 100644
66257--- a/fs/ntfs/file.c
66258+++ b/fs/ntfs/file.c
66259@@ -1281,7 +1281,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
66260 char *addr;
66261 size_t total = 0;
66262 unsigned len;
66263- int left;
66264+ unsigned left;
66265
66266 do {
66267 len = PAGE_CACHE_SIZE - ofs;
66268diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
66269index 9e1e112..241a52a 100644
66270--- a/fs/ntfs/super.c
66271+++ b/fs/ntfs/super.c
66272@@ -688,7 +688,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
66273 if (!silent)
66274 ntfs_error(sb, "Primary boot sector is invalid.");
66275 } else if (!silent)
66276- ntfs_error(sb, read_err_str, "primary");
66277+ ntfs_error(sb, read_err_str, "%s", "primary");
66278 if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
66279 if (bh_primary)
66280 brelse(bh_primary);
66281@@ -704,7 +704,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
66282 goto hotfix_primary_boot_sector;
66283 brelse(bh_backup);
66284 } else if (!silent)
66285- ntfs_error(sb, read_err_str, "backup");
66286+ ntfs_error(sb, read_err_str, "%s", "backup");
66287 /* Try to read NT3.51- backup boot sector. */
66288 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
66289 if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
66290@@ -715,7 +715,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
66291 "sector.");
66292 brelse(bh_backup);
66293 } else if (!silent)
66294- ntfs_error(sb, read_err_str, "backup");
66295+ ntfs_error(sb, read_err_str, "%s", "backup");
66296 /* We failed. Cleanup and return. */
66297 if (bh_primary)
66298 brelse(bh_primary);
66299diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
66300index 0440134..d52c93a 100644
66301--- a/fs/ocfs2/localalloc.c
66302+++ b/fs/ocfs2/localalloc.c
66303@@ -1320,7 +1320,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
66304 goto bail;
66305 }
66306
66307- atomic_inc(&osb->alloc_stats.moves);
66308+ atomic_inc_unchecked(&osb->alloc_stats.moves);
66309
66310 bail:
66311 if (handle)
66312diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
66313index 460c6c3..b4ef513 100644
66314--- a/fs/ocfs2/ocfs2.h
66315+++ b/fs/ocfs2/ocfs2.h
66316@@ -247,11 +247,11 @@ enum ocfs2_vol_state
66317
66318 struct ocfs2_alloc_stats
66319 {
66320- atomic_t moves;
66321- atomic_t local_data;
66322- atomic_t bitmap_data;
66323- atomic_t bg_allocs;
66324- atomic_t bg_extends;
66325+ atomic_unchecked_t moves;
66326+ atomic_unchecked_t local_data;
66327+ atomic_unchecked_t bitmap_data;
66328+ atomic_unchecked_t bg_allocs;
66329+ atomic_unchecked_t bg_extends;
66330 };
66331
66332 enum ocfs2_local_alloc_state
66333diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
66334index ee541f9..df3a500 100644
66335--- a/fs/ocfs2/refcounttree.c
66336+++ b/fs/ocfs2/refcounttree.c
66337@@ -4276,7 +4276,7 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
66338 error = posix_acl_create(dir, &mode, &default_acl, &acl);
66339 if (error) {
66340 mlog_errno(error);
66341- goto out;
66342+ return error;
66343 }
66344
66345 error = ocfs2_create_inode_in_orphan(dir, mode,
66346diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
66347index 0cb889a..6a26b24 100644
66348--- a/fs/ocfs2/suballoc.c
66349+++ b/fs/ocfs2/suballoc.c
66350@@ -867,7 +867,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
66351 mlog_errno(status);
66352 goto bail;
66353 }
66354- atomic_inc(&osb->alloc_stats.bg_extends);
66355+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
66356
66357 /* You should never ask for this much metadata */
66358 BUG_ON(bits_wanted >
66359@@ -2014,7 +2014,7 @@ int ocfs2_claim_metadata(handle_t *handle,
66360 mlog_errno(status);
66361 goto bail;
66362 }
66363- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66364+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66365
66366 *suballoc_loc = res.sr_bg_blkno;
66367 *suballoc_bit_start = res.sr_bit_offset;
66368@@ -2180,7 +2180,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
66369 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
66370 res->sr_bits);
66371
66372- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66373+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66374
66375 BUG_ON(res->sr_bits != 1);
66376
66377@@ -2222,7 +2222,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
66378 mlog_errno(status);
66379 goto bail;
66380 }
66381- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66382+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
66383
66384 BUG_ON(res.sr_bits != 1);
66385
66386@@ -2326,7 +2326,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
66387 cluster_start,
66388 num_clusters);
66389 if (!status)
66390- atomic_inc(&osb->alloc_stats.local_data);
66391+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
66392 } else {
66393 if (min_clusters > (osb->bitmap_cpg - 1)) {
66394 /* The only paths asking for contiguousness
66395@@ -2352,7 +2352,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
66396 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
66397 res.sr_bg_blkno,
66398 res.sr_bit_offset);
66399- atomic_inc(&osb->alloc_stats.bitmap_data);
66400+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
66401 *num_clusters = res.sr_bits;
66402 }
66403 }
66404diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
66405index 2667518..24bcf79 100644
66406--- a/fs/ocfs2/super.c
66407+++ b/fs/ocfs2/super.c
66408@@ -308,11 +308,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
66409 "%10s => GlobalAllocs: %d LocalAllocs: %d "
66410 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
66411 "Stats",
66412- atomic_read(&osb->alloc_stats.bitmap_data),
66413- atomic_read(&osb->alloc_stats.local_data),
66414- atomic_read(&osb->alloc_stats.bg_allocs),
66415- atomic_read(&osb->alloc_stats.moves),
66416- atomic_read(&osb->alloc_stats.bg_extends));
66417+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
66418+ atomic_read_unchecked(&osb->alloc_stats.local_data),
66419+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
66420+ atomic_read_unchecked(&osb->alloc_stats.moves),
66421+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
66422
66423 out += snprintf(buf + out, len - out,
66424 "%10s => State: %u Descriptor: %llu Size: %u bits "
66425@@ -2093,11 +2093,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
66426
66427 mutex_init(&osb->system_file_mutex);
66428
66429- atomic_set(&osb->alloc_stats.moves, 0);
66430- atomic_set(&osb->alloc_stats.local_data, 0);
66431- atomic_set(&osb->alloc_stats.bitmap_data, 0);
66432- atomic_set(&osb->alloc_stats.bg_allocs, 0);
66433- atomic_set(&osb->alloc_stats.bg_extends, 0);
66434+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
66435+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
66436+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
66437+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
66438+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
66439
66440 /* Copy the blockcheck stats from the superblock probe */
66441 osb->osb_ecc_stats = *stats;
66442diff --git a/fs/open.c b/fs/open.c
66443index 44a3be1..5e97aa1 100644
66444--- a/fs/open.c
66445+++ b/fs/open.c
66446@@ -32,6 +32,8 @@
66447 #include <linux/dnotify.h>
66448 #include <linux/compat.h>
66449
66450+#define CREATE_TRACE_POINTS
66451+#include <trace/events/fs.h>
66452 #include "internal.h"
66453
66454 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
66455@@ -103,6 +105,8 @@ long vfs_truncate(struct path *path, loff_t length)
66456 error = locks_verify_truncate(inode, NULL, length);
66457 if (!error)
66458 error = security_path_truncate(path);
66459+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
66460+ error = -EACCES;
66461 if (!error)
66462 error = do_truncate(path->dentry, length, 0, NULL);
66463
66464@@ -187,6 +191,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
66465 error = locks_verify_truncate(inode, f.file, length);
66466 if (!error)
66467 error = security_path_truncate(&f.file->f_path);
66468+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
66469+ error = -EACCES;
66470 if (!error)
66471 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
66472 sb_end_write(inode->i_sb);
66473@@ -392,6 +398,9 @@ retry:
66474 if (__mnt_is_readonly(path.mnt))
66475 res = -EROFS;
66476
66477+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
66478+ res = -EACCES;
66479+
66480 out_path_release:
66481 path_put(&path);
66482 if (retry_estale(res, lookup_flags)) {
66483@@ -423,6 +432,8 @@ retry:
66484 if (error)
66485 goto dput_and_out;
66486
66487+ gr_log_chdir(path.dentry, path.mnt);
66488+
66489 set_fs_pwd(current->fs, &path);
66490
66491 dput_and_out:
66492@@ -452,6 +463,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
66493 goto out_putf;
66494
66495 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
66496+
66497+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
66498+ error = -EPERM;
66499+
66500+ if (!error)
66501+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
66502+
66503 if (!error)
66504 set_fs_pwd(current->fs, &f.file->f_path);
66505 out_putf:
66506@@ -481,7 +499,13 @@ retry:
66507 if (error)
66508 goto dput_and_out;
66509
66510+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
66511+ goto dput_and_out;
66512+
66513 set_fs_root(current->fs, &path);
66514+
66515+ gr_handle_chroot_chdir(&path);
66516+
66517 error = 0;
66518 dput_and_out:
66519 path_put(&path);
66520@@ -505,6 +529,16 @@ static int chmod_common(struct path *path, umode_t mode)
66521 return error;
66522 retry_deleg:
66523 mutex_lock(&inode->i_mutex);
66524+
66525+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
66526+ error = -EACCES;
66527+ goto out_unlock;
66528+ }
66529+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
66530+ error = -EACCES;
66531+ goto out_unlock;
66532+ }
66533+
66534 error = security_path_chmod(path, mode);
66535 if (error)
66536 goto out_unlock;
66537@@ -570,6 +604,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
66538 uid = make_kuid(current_user_ns(), user);
66539 gid = make_kgid(current_user_ns(), group);
66540
66541+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
66542+ return -EACCES;
66543+
66544 retry_deleg:
66545 newattrs.ia_valid = ATTR_CTIME;
66546 if (user != (uid_t) -1) {
66547@@ -1017,6 +1054,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
66548 } else {
66549 fsnotify_open(f);
66550 fd_install(fd, f);
66551+ trace_do_sys_open(tmp->name, flags, mode);
66552 }
66553 }
66554 putname(tmp);
66555diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
66556index 5f0d199..13b74b9 100644
66557--- a/fs/overlayfs/super.c
66558+++ b/fs/overlayfs/super.c
66559@@ -172,7 +172,7 @@ void ovl_path_lower(struct dentry *dentry, struct path *path)
66560 {
66561 struct ovl_entry *oe = dentry->d_fsdata;
66562
66563- *path = oe->numlower ? oe->lowerstack[0] : (struct path) { NULL, NULL };
66564+ *path = oe->numlower ? oe->lowerstack[0] : (struct path) { .dentry = NULL, .mnt = NULL };
66565 }
66566
66567 int ovl_want_write(struct dentry *dentry)
66568@@ -816,8 +816,8 @@ static unsigned int ovl_split_lowerdirs(char *str)
66569
66570 static int ovl_fill_super(struct super_block *sb, void *data, int silent)
66571 {
66572- struct path upperpath = { NULL, NULL };
66573- struct path workpath = { NULL, NULL };
66574+ struct path upperpath = { .dentry = NULL, .mnt = NULL };
66575+ struct path workpath = { .dentry = NULL, .mnt = NULL };
66576 struct dentry *root_dentry;
66577 struct ovl_entry *oe;
66578 struct ovl_fs *ufs;
66579diff --git a/fs/pipe.c b/fs/pipe.c
66580index 21981e5..2c0bffb 100644
66581--- a/fs/pipe.c
66582+++ b/fs/pipe.c
66583@@ -37,7 +37,7 @@ unsigned int pipe_max_size = 1048576;
66584 /*
66585 * Minimum pipe size, as required by POSIX
66586 */
66587-unsigned int pipe_min_size = PAGE_SIZE;
66588+unsigned int pipe_min_size __read_only = PAGE_SIZE;
66589
66590 /*
66591 * We use a start+len construction, which provides full use of the
66592@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
66593
66594 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
66595 {
66596- if (pipe->files)
66597+ if (atomic_read(&pipe->files))
66598 mutex_lock_nested(&pipe->mutex, subclass);
66599 }
66600
66601@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
66602
66603 void pipe_unlock(struct pipe_inode_info *pipe)
66604 {
66605- if (pipe->files)
66606+ if (atomic_read(&pipe->files))
66607 mutex_unlock(&pipe->mutex);
66608 }
66609 EXPORT_SYMBOL(pipe_unlock);
66610@@ -292,9 +292,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
66611 }
66612 if (bufs) /* More to do? */
66613 continue;
66614- if (!pipe->writers)
66615+ if (!atomic_read(&pipe->writers))
66616 break;
66617- if (!pipe->waiting_writers) {
66618+ if (!atomic_read(&pipe->waiting_writers)) {
66619 /* syscall merging: Usually we must not sleep
66620 * if O_NONBLOCK is set, or if we got some data.
66621 * But if a writer sleeps in kernel space, then
66622@@ -351,7 +351,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
66623
66624 __pipe_lock(pipe);
66625
66626- if (!pipe->readers) {
66627+ if (!atomic_read(&pipe->readers)) {
66628 send_sig(SIGPIPE, current, 0);
66629 ret = -EPIPE;
66630 goto out;
66631@@ -387,7 +387,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
66632 for (;;) {
66633 int bufs;
66634
66635- if (!pipe->readers) {
66636+ if (!atomic_read(&pipe->readers)) {
66637 send_sig(SIGPIPE, current, 0);
66638 if (!ret)
66639 ret = -EPIPE;
66640@@ -455,9 +455,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
66641 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
66642 do_wakeup = 0;
66643 }
66644- pipe->waiting_writers++;
66645+ atomic_inc(&pipe->waiting_writers);
66646 pipe_wait(pipe);
66647- pipe->waiting_writers--;
66648+ atomic_dec(&pipe->waiting_writers);
66649 }
66650 out:
66651 __pipe_unlock(pipe);
66652@@ -512,7 +512,7 @@ pipe_poll(struct file *filp, poll_table *wait)
66653 mask = 0;
66654 if (filp->f_mode & FMODE_READ) {
66655 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
66656- if (!pipe->writers && filp->f_version != pipe->w_counter)
66657+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
66658 mask |= POLLHUP;
66659 }
66660
66661@@ -522,7 +522,7 @@ pipe_poll(struct file *filp, poll_table *wait)
66662 * Most Unices do not set POLLERR for FIFOs but on Linux they
66663 * behave exactly like pipes for poll().
66664 */
66665- if (!pipe->readers)
66666+ if (!atomic_read(&pipe->readers))
66667 mask |= POLLERR;
66668 }
66669
66670@@ -534,7 +534,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
66671 int kill = 0;
66672
66673 spin_lock(&inode->i_lock);
66674- if (!--pipe->files) {
66675+ if (atomic_dec_and_test(&pipe->files)) {
66676 inode->i_pipe = NULL;
66677 kill = 1;
66678 }
66679@@ -551,11 +551,11 @@ pipe_release(struct inode *inode, struct file *file)
66680
66681 __pipe_lock(pipe);
66682 if (file->f_mode & FMODE_READ)
66683- pipe->readers--;
66684+ atomic_dec(&pipe->readers);
66685 if (file->f_mode & FMODE_WRITE)
66686- pipe->writers--;
66687+ atomic_dec(&pipe->writers);
66688
66689- if (pipe->readers || pipe->writers) {
66690+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
66691 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
66692 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
66693 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
66694@@ -620,7 +620,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
66695 kfree(pipe);
66696 }
66697
66698-static struct vfsmount *pipe_mnt __read_mostly;
66699+struct vfsmount *pipe_mnt __read_mostly;
66700
66701 /*
66702 * pipefs_dname() is called from d_path().
66703@@ -650,8 +650,9 @@ static struct inode * get_pipe_inode(void)
66704 goto fail_iput;
66705
66706 inode->i_pipe = pipe;
66707- pipe->files = 2;
66708- pipe->readers = pipe->writers = 1;
66709+ atomic_set(&pipe->files, 2);
66710+ atomic_set(&pipe->readers, 1);
66711+ atomic_set(&pipe->writers, 1);
66712 inode->i_fop = &pipefifo_fops;
66713
66714 /*
66715@@ -830,17 +831,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
66716 spin_lock(&inode->i_lock);
66717 if (inode->i_pipe) {
66718 pipe = inode->i_pipe;
66719- pipe->files++;
66720+ atomic_inc(&pipe->files);
66721 spin_unlock(&inode->i_lock);
66722 } else {
66723 spin_unlock(&inode->i_lock);
66724 pipe = alloc_pipe_info();
66725 if (!pipe)
66726 return -ENOMEM;
66727- pipe->files = 1;
66728+ atomic_set(&pipe->files, 1);
66729 spin_lock(&inode->i_lock);
66730 if (unlikely(inode->i_pipe)) {
66731- inode->i_pipe->files++;
66732+ atomic_inc(&inode->i_pipe->files);
66733 spin_unlock(&inode->i_lock);
66734 free_pipe_info(pipe);
66735 pipe = inode->i_pipe;
66736@@ -865,10 +866,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
66737 * opened, even when there is no process writing the FIFO.
66738 */
66739 pipe->r_counter++;
66740- if (pipe->readers++ == 0)
66741+ if (atomic_inc_return(&pipe->readers) == 1)
66742 wake_up_partner(pipe);
66743
66744- if (!is_pipe && !pipe->writers) {
66745+ if (!is_pipe && !atomic_read(&pipe->writers)) {
66746 if ((filp->f_flags & O_NONBLOCK)) {
66747 /* suppress POLLHUP until we have
66748 * seen a writer */
66749@@ -887,14 +888,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
66750 * errno=ENXIO when there is no process reading the FIFO.
66751 */
66752 ret = -ENXIO;
66753- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
66754+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
66755 goto err;
66756
66757 pipe->w_counter++;
66758- if (!pipe->writers++)
66759+ if (atomic_inc_return(&pipe->writers) == 1)
66760 wake_up_partner(pipe);
66761
66762- if (!is_pipe && !pipe->readers) {
66763+ if (!is_pipe && !atomic_read(&pipe->readers)) {
66764 if (wait_for_partner(pipe, &pipe->r_counter))
66765 goto err_wr;
66766 }
66767@@ -908,11 +909,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
66768 * the process can at least talk to itself.
66769 */
66770
66771- pipe->readers++;
66772- pipe->writers++;
66773+ atomic_inc(&pipe->readers);
66774+ atomic_inc(&pipe->writers);
66775 pipe->r_counter++;
66776 pipe->w_counter++;
66777- if (pipe->readers == 1 || pipe->writers == 1)
66778+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
66779 wake_up_partner(pipe);
66780 break;
66781
66782@@ -926,13 +927,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
66783 return 0;
66784
66785 err_rd:
66786- if (!--pipe->readers)
66787+ if (atomic_dec_and_test(&pipe->readers))
66788 wake_up_interruptible(&pipe->wait);
66789 ret = -ERESTARTSYS;
66790 goto err;
66791
66792 err_wr:
66793- if (!--pipe->writers)
66794+ if (atomic_dec_and_test(&pipe->writers))
66795 wake_up_interruptible(&pipe->wait);
66796 ret = -ERESTARTSYS;
66797 goto err;
66798@@ -1010,7 +1011,7 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
66799 * Currently we rely on the pipe array holding a power-of-2 number
66800 * of pages.
66801 */
66802-static inline unsigned int round_pipe_size(unsigned int size)
66803+static inline unsigned long round_pipe_size(unsigned long size)
66804 {
66805 unsigned long nr_pages;
66806
66807@@ -1058,13 +1059,16 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
66808
66809 switch (cmd) {
66810 case F_SETPIPE_SZ: {
66811- unsigned int size, nr_pages;
66812+ unsigned long size, nr_pages;
66813+
66814+ ret = -EINVAL;
66815+ if (arg < pipe_min_size)
66816+ goto out;
66817
66818 size = round_pipe_size(arg);
66819 nr_pages = size >> PAGE_SHIFT;
66820
66821- ret = -EINVAL;
66822- if (!nr_pages)
66823+ if (size < pipe_min_size)
66824 goto out;
66825
66826 if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
66827diff --git a/fs/posix_acl.c b/fs/posix_acl.c
66828index 3a48bb7..403067b 100644
66829--- a/fs/posix_acl.c
66830+++ b/fs/posix_acl.c
66831@@ -20,6 +20,7 @@
66832 #include <linux/xattr.h>
66833 #include <linux/export.h>
66834 #include <linux/user_namespace.h>
66835+#include <linux/grsecurity.h>
66836
66837 struct posix_acl **acl_by_type(struct inode *inode, int type)
66838 {
66839@@ -277,7 +278,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
66840 }
66841 }
66842 if (mode_p)
66843- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
66844+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
66845 return not_equiv;
66846 }
66847 EXPORT_SYMBOL(posix_acl_equiv_mode);
66848@@ -427,7 +428,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
66849 mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
66850 }
66851
66852- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
66853+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
66854 return not_equiv;
66855 }
66856
66857@@ -485,6 +486,8 @@ __posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
66858 struct posix_acl *clone = posix_acl_clone(*acl, gfp);
66859 int err = -ENOMEM;
66860 if (clone) {
66861+ *mode_p &= ~gr_acl_umask();
66862+
66863 err = posix_acl_create_masq(clone, mode_p);
66864 if (err < 0) {
66865 posix_acl_release(clone);
66866@@ -663,11 +666,12 @@ struct posix_acl *
66867 posix_acl_from_xattr(struct user_namespace *user_ns,
66868 const void *value, size_t size)
66869 {
66870- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
66871- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
66872+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
66873+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
66874 int count;
66875 struct posix_acl *acl;
66876 struct posix_acl_entry *acl_e;
66877+ umode_t umask = gr_acl_umask();
66878
66879 if (!value)
66880 return NULL;
66881@@ -693,12 +697,18 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
66882
66883 switch(acl_e->e_tag) {
66884 case ACL_USER_OBJ:
66885+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
66886+ break;
66887 case ACL_GROUP_OBJ:
66888 case ACL_MASK:
66889+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
66890+ break;
66891 case ACL_OTHER:
66892+ acl_e->e_perm &= ~(umask & S_IRWXO);
66893 break;
66894
66895 case ACL_USER:
66896+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
66897 acl_e->e_uid =
66898 make_kuid(user_ns,
66899 le32_to_cpu(entry->e_id));
66900@@ -706,6 +716,7 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
66901 goto fail;
66902 break;
66903 case ACL_GROUP:
66904+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
66905 acl_e->e_gid =
66906 make_kgid(user_ns,
66907 le32_to_cpu(entry->e_id));
66908diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
66909index 2183fcf..3c32a98 100644
66910--- a/fs/proc/Kconfig
66911+++ b/fs/proc/Kconfig
66912@@ -30,7 +30,7 @@ config PROC_FS
66913
66914 config PROC_KCORE
66915 bool "/proc/kcore support" if !ARM
66916- depends on PROC_FS && MMU
66917+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
66918 help
66919 Provides a virtual ELF core file of the live kernel. This can
66920 be read with gdb and other ELF tools. No modifications can be
66921@@ -38,8 +38,8 @@ config PROC_KCORE
66922
66923 config PROC_VMCORE
66924 bool "/proc/vmcore support"
66925- depends on PROC_FS && CRASH_DUMP
66926- default y
66927+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
66928+ default n
66929 help
66930 Exports the dump image of crashed kernel in ELF format.
66931
66932@@ -63,8 +63,8 @@ config PROC_SYSCTL
66933 limited in memory.
66934
66935 config PROC_PAGE_MONITOR
66936- default y
66937- depends on PROC_FS && MMU
66938+ default n
66939+ depends on PROC_FS && MMU && !GRKERNSEC
66940 bool "Enable /proc page monitoring" if EXPERT
66941 help
66942 Various /proc files exist to monitor process memory utilization:
66943diff --git a/fs/proc/array.c b/fs/proc/array.c
66944index 1295a00..4c91a6b 100644
66945--- a/fs/proc/array.c
66946+++ b/fs/proc/array.c
66947@@ -60,6 +60,7 @@
66948 #include <linux/tty.h>
66949 #include <linux/string.h>
66950 #include <linux/mman.h>
66951+#include <linux/grsecurity.h>
66952 #include <linux/proc_fs.h>
66953 #include <linux/ioport.h>
66954 #include <linux/uaccess.h>
66955@@ -322,6 +323,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
66956 cpumask_pr_args(&task->cpus_allowed));
66957 }
66958
66959+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
66960+static inline void task_pax(struct seq_file *m, struct task_struct *p)
66961+{
66962+ if (p->mm)
66963+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
66964+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
66965+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
66966+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
66967+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
66968+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
66969+ else
66970+ seq_printf(m, "PaX:\t-----\n");
66971+}
66972+#endif
66973+
66974 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
66975 struct pid *pid, struct task_struct *task)
66976 {
66977@@ -340,9 +356,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
66978 task_cpus_allowed(m, task);
66979 cpuset_task_status_allowed(m, task);
66980 task_context_switch_counts(m, task);
66981+
66982+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
66983+ task_pax(m, task);
66984+#endif
66985+
66986+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
66987+ task_grsec_rbac(m, task);
66988+#endif
66989+
66990 return 0;
66991 }
66992
66993+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66994+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
66995+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
66996+ _mm->pax_flags & MF_PAX_SEGMEXEC))
66997+#endif
66998+
66999 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
67000 struct pid *pid, struct task_struct *task, int whole)
67001 {
67002@@ -364,6 +395,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
67003 char tcomm[sizeof(task->comm)];
67004 unsigned long flags;
67005
67006+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67007+ if (current->exec_id != m->exec_id) {
67008+ gr_log_badprocpid("stat");
67009+ return 0;
67010+ }
67011+#endif
67012+
67013 state = *get_task_state(task);
67014 vsize = eip = esp = 0;
67015 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
67016@@ -434,6 +472,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
67017 gtime = task_gtime(task);
67018 }
67019
67020+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67021+ if (PAX_RAND_FLAGS(mm)) {
67022+ eip = 0;
67023+ esp = 0;
67024+ wchan = 0;
67025+ }
67026+#endif
67027+#ifdef CONFIG_GRKERNSEC_HIDESYM
67028+ wchan = 0;
67029+ eip =0;
67030+ esp =0;
67031+#endif
67032+
67033 /* scale priority and nice values from timeslices to -20..20 */
67034 /* to make it look like a "normal" Unix priority/nice value */
67035 priority = task_prio(task);
67036@@ -465,9 +516,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
67037 seq_put_decimal_ull(m, ' ', vsize);
67038 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
67039 seq_put_decimal_ull(m, ' ', rsslim);
67040+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67041+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
67042+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
67043+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
67044+#else
67045 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
67046 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
67047 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
67048+#endif
67049 seq_put_decimal_ull(m, ' ', esp);
67050 seq_put_decimal_ull(m, ' ', eip);
67051 /* The signal information here is obsolete.
67052@@ -489,7 +546,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
67053 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
67054 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
67055
67056- if (mm && permitted) {
67057+ if (mm && permitted
67058+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67059+ && !PAX_RAND_FLAGS(mm)
67060+#endif
67061+ ) {
67062 seq_put_decimal_ull(m, ' ', mm->start_data);
67063 seq_put_decimal_ull(m, ' ', mm->end_data);
67064 seq_put_decimal_ull(m, ' ', mm->start_brk);
67065@@ -527,8 +588,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
67066 struct pid *pid, struct task_struct *task)
67067 {
67068 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
67069- struct mm_struct *mm = get_task_mm(task);
67070+ struct mm_struct *mm;
67071
67072+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67073+ if (current->exec_id != m->exec_id) {
67074+ gr_log_badprocpid("statm");
67075+ return 0;
67076+ }
67077+#endif
67078+ mm = get_task_mm(task);
67079 if (mm) {
67080 size = task_statm(mm, &shared, &text, &data, &resident);
67081 mmput(mm);
67082@@ -551,6 +619,20 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
67083 return 0;
67084 }
67085
67086+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
67087+int proc_pid_ipaddr(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task)
67088+{
67089+ unsigned long flags;
67090+ u32 curr_ip = 0;
67091+
67092+ if (lock_task_sighand(task, &flags)) {
67093+ curr_ip = task->signal->curr_ip;
67094+ unlock_task_sighand(task, &flags);
67095+ }
67096+ return seq_printf(m, "%pI4\n", &curr_ip);
67097+}
67098+#endif
67099+
67100 #ifdef CONFIG_CHECKPOINT_RESTORE
67101 static struct pid *
67102 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
67103diff --git a/fs/proc/base.c b/fs/proc/base.c
67104index 3f3d7ae..68de109 100644
67105--- a/fs/proc/base.c
67106+++ b/fs/proc/base.c
67107@@ -113,6 +113,14 @@ struct pid_entry {
67108 union proc_op op;
67109 };
67110
67111+struct getdents_callback {
67112+ struct linux_dirent __user * current_dir;
67113+ struct linux_dirent __user * previous;
67114+ struct file * file;
67115+ int count;
67116+ int error;
67117+};
67118+
67119 #define NOD(NAME, MODE, IOP, FOP, OP) { \
67120 .name = (NAME), \
67121 .len = sizeof(NAME) - 1, \
67122@@ -208,12 +216,28 @@ static int proc_pid_cmdline(struct seq_file *m, struct pid_namespace *ns,
67123 return 0;
67124 }
67125
67126+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67127+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
67128+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
67129+ _mm->pax_flags & MF_PAX_SEGMEXEC))
67130+#endif
67131+
67132 static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
67133 struct pid *pid, struct task_struct *task)
67134 {
67135 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
67136 if (mm && !IS_ERR(mm)) {
67137 unsigned int nwords = 0;
67138+
67139+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67140+ /* allow if we're currently ptracing this task */
67141+ if (PAX_RAND_FLAGS(mm) &&
67142+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
67143+ mmput(mm);
67144+ return 0;
67145+ }
67146+#endif
67147+
67148 do {
67149 nwords += 2;
67150 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
67151@@ -225,7 +249,7 @@ static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
67152 }
67153
67154
67155-#ifdef CONFIG_KALLSYMS
67156+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67157 /*
67158 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
67159 * Returns the resolved symbol. If that fails, simply return the address.
67160@@ -265,7 +289,7 @@ static void unlock_trace(struct task_struct *task)
67161 mutex_unlock(&task->signal->cred_guard_mutex);
67162 }
67163
67164-#ifdef CONFIG_STACKTRACE
67165+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67166
67167 #define MAX_STACK_TRACE_DEPTH 64
67168
67169@@ -456,7 +480,7 @@ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns,
67170 return 0;
67171 }
67172
67173-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
67174+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
67175 static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
67176 struct pid *pid, struct task_struct *task)
67177 {
67178@@ -486,7 +510,7 @@ static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
67179 /************************************************************************/
67180
67181 /* permission checks */
67182-static int proc_fd_access_allowed(struct inode *inode)
67183+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
67184 {
67185 struct task_struct *task;
67186 int allowed = 0;
67187@@ -496,7 +520,10 @@ static int proc_fd_access_allowed(struct inode *inode)
67188 */
67189 task = get_proc_task(inode);
67190 if (task) {
67191- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
67192+ if (log)
67193+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
67194+ else
67195+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
67196 put_task_struct(task);
67197 }
67198 return allowed;
67199@@ -527,10 +554,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
67200 struct task_struct *task,
67201 int hide_pid_min)
67202 {
67203+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
67204+ return false;
67205+
67206+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67207+ rcu_read_lock();
67208+ {
67209+ const struct cred *tmpcred = current_cred();
67210+ const struct cred *cred = __task_cred(task);
67211+
67212+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
67213+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67214+ || in_group_p(grsec_proc_gid)
67215+#endif
67216+ ) {
67217+ rcu_read_unlock();
67218+ return true;
67219+ }
67220+ }
67221+ rcu_read_unlock();
67222+
67223+ if (!pid->hide_pid)
67224+ return false;
67225+#endif
67226+
67227 if (pid->hide_pid < hide_pid_min)
67228 return true;
67229 if (in_group_p(pid->pid_gid))
67230 return true;
67231+
67232 return ptrace_may_access(task, PTRACE_MODE_READ);
67233 }
67234
67235@@ -548,7 +600,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
67236 put_task_struct(task);
67237
67238 if (!has_perms) {
67239+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67240+ {
67241+#else
67242 if (pid->hide_pid == 2) {
67243+#endif
67244 /*
67245 * Let's make getdents(), stat(), and open()
67246 * consistent with each other. If a process
67247@@ -609,6 +665,10 @@ struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
67248
67249 if (task) {
67250 mm = mm_access(task, mode);
67251+ if (!IS_ERR_OR_NULL(mm) && gr_acl_handle_procpidmem(task)) {
67252+ mmput(mm);
67253+ mm = ERR_PTR(-EPERM);
67254+ }
67255 put_task_struct(task);
67256
67257 if (!IS_ERR_OR_NULL(mm)) {
67258@@ -630,6 +690,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
67259 return PTR_ERR(mm);
67260
67261 file->private_data = mm;
67262+
67263+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67264+ file->f_version = current->exec_id;
67265+#endif
67266+
67267 return 0;
67268 }
67269
67270@@ -651,6 +716,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
67271 ssize_t copied;
67272 char *page;
67273
67274+#ifdef CONFIG_GRKERNSEC
67275+ if (write)
67276+ return -EPERM;
67277+#endif
67278+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67279+ if (file->f_version != current->exec_id) {
67280+ gr_log_badprocpid("mem");
67281+ return 0;
67282+ }
67283+#endif
67284+
67285 if (!mm)
67286 return 0;
67287
67288@@ -663,7 +739,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
67289 goto free;
67290
67291 while (count > 0) {
67292- int this_len = min_t(int, count, PAGE_SIZE);
67293+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
67294
67295 if (write && copy_from_user(page, buf, this_len)) {
67296 copied = -EFAULT;
67297@@ -755,6 +831,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
67298 if (!mm)
67299 return 0;
67300
67301+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67302+ if (file->f_version != current->exec_id) {
67303+ gr_log_badprocpid("environ");
67304+ return 0;
67305+ }
67306+#endif
67307+
67308 page = (char *)__get_free_page(GFP_TEMPORARY);
67309 if (!page)
67310 return -ENOMEM;
67311@@ -764,7 +847,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
67312 goto free;
67313 while (count > 0) {
67314 size_t this_len, max_len;
67315- int retval;
67316+ ssize_t retval;
67317
67318 if (src >= (mm->env_end - mm->env_start))
67319 break;
67320@@ -1378,7 +1461,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
67321 int error = -EACCES;
67322
67323 /* Are we allowed to snoop on the tasks file descriptors? */
67324- if (!proc_fd_access_allowed(inode))
67325+ if (!proc_fd_access_allowed(inode, 0))
67326 goto out;
67327
67328 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
67329@@ -1422,8 +1505,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
67330 struct path path;
67331
67332 /* Are we allowed to snoop on the tasks file descriptors? */
67333- if (!proc_fd_access_allowed(inode))
67334- goto out;
67335+ /* logging this is needed for learning on chromium to work properly,
67336+ but we don't want to flood the logs from 'ps' which does a readlink
67337+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
67338+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
67339+ */
67340+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
67341+ if (!proc_fd_access_allowed(inode,0))
67342+ goto out;
67343+ } else {
67344+ if (!proc_fd_access_allowed(inode,1))
67345+ goto out;
67346+ }
67347
67348 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
67349 if (error)
67350@@ -1473,7 +1566,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
67351 rcu_read_lock();
67352 cred = __task_cred(task);
67353 inode->i_uid = cred->euid;
67354+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67355+ inode->i_gid = grsec_proc_gid;
67356+#else
67357 inode->i_gid = cred->egid;
67358+#endif
67359 rcu_read_unlock();
67360 }
67361 security_task_to_inode(task, inode);
67362@@ -1509,10 +1606,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
67363 return -ENOENT;
67364 }
67365 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
67366+#ifdef CONFIG_GRKERNSEC_PROC_USER
67367+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
67368+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67369+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
67370+#endif
67371 task_dumpable(task)) {
67372 cred = __task_cred(task);
67373 stat->uid = cred->euid;
67374+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67375+ stat->gid = grsec_proc_gid;
67376+#else
67377 stat->gid = cred->egid;
67378+#endif
67379 }
67380 }
67381 rcu_read_unlock();
67382@@ -1550,11 +1656,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
67383
67384 if (task) {
67385 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
67386+#ifdef CONFIG_GRKERNSEC_PROC_USER
67387+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
67388+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67389+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
67390+#endif
67391 task_dumpable(task)) {
67392 rcu_read_lock();
67393 cred = __task_cred(task);
67394 inode->i_uid = cred->euid;
67395+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67396+ inode->i_gid = grsec_proc_gid;
67397+#else
67398 inode->i_gid = cred->egid;
67399+#endif
67400 rcu_read_unlock();
67401 } else {
67402 inode->i_uid = GLOBAL_ROOT_UID;
67403@@ -2085,6 +2200,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
67404 if (!task)
67405 goto out_no_task;
67406
67407+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
67408+ goto out;
67409+
67410 /*
67411 * Yes, it does not scale. And it should not. Don't add
67412 * new entries into /proc/<tgid>/ without very good reasons.
67413@@ -2115,6 +2233,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
67414 if (!task)
67415 return -ENOENT;
67416
67417+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
67418+ goto out;
67419+
67420 if (!dir_emit_dots(file, ctx))
67421 goto out;
67422
67423@@ -2557,7 +2678,7 @@ static const struct pid_entry tgid_base_stuff[] = {
67424 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
67425 #endif
67426 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
67427-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
67428+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
67429 ONE("syscall", S_IRUSR, proc_pid_syscall),
67430 #endif
67431 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
67432@@ -2582,10 +2703,10 @@ static const struct pid_entry tgid_base_stuff[] = {
67433 #ifdef CONFIG_SECURITY
67434 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
67435 #endif
67436-#ifdef CONFIG_KALLSYMS
67437+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67438 ONE("wchan", S_IRUGO, proc_pid_wchan),
67439 #endif
67440-#ifdef CONFIG_STACKTRACE
67441+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67442 ONE("stack", S_IRUSR, proc_pid_stack),
67443 #endif
67444 #ifdef CONFIG_SCHEDSTATS
67445@@ -2619,6 +2740,9 @@ static const struct pid_entry tgid_base_stuff[] = {
67446 #ifdef CONFIG_HARDWALL
67447 ONE("hardwall", S_IRUGO, proc_pid_hardwall),
67448 #endif
67449+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
67450+ ONE("ipaddr", S_IRUSR, proc_pid_ipaddr),
67451+#endif
67452 #ifdef CONFIG_USER_NS
67453 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
67454 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
67455@@ -2751,7 +2875,14 @@ static int proc_pid_instantiate(struct inode *dir,
67456 if (!inode)
67457 goto out;
67458
67459+#ifdef CONFIG_GRKERNSEC_PROC_USER
67460+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
67461+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67462+ inode->i_gid = grsec_proc_gid;
67463+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
67464+#else
67465 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
67466+#endif
67467 inode->i_op = &proc_tgid_base_inode_operations;
67468 inode->i_fop = &proc_tgid_base_operations;
67469 inode->i_flags|=S_IMMUTABLE;
67470@@ -2789,7 +2920,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
67471 if (!task)
67472 goto out;
67473
67474+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
67475+ goto out_put_task;
67476+
67477 result = proc_pid_instantiate(dir, dentry, task, NULL);
67478+out_put_task:
67479 put_task_struct(task);
67480 out:
67481 return ERR_PTR(result);
67482@@ -2903,7 +3038,7 @@ static const struct pid_entry tid_base_stuff[] = {
67483 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
67484 #endif
67485 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
67486-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
67487+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
67488 ONE("syscall", S_IRUSR, proc_pid_syscall),
67489 #endif
67490 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
67491@@ -2930,10 +3065,10 @@ static const struct pid_entry tid_base_stuff[] = {
67492 #ifdef CONFIG_SECURITY
67493 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
67494 #endif
67495-#ifdef CONFIG_KALLSYMS
67496+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67497 ONE("wchan", S_IRUGO, proc_pid_wchan),
67498 #endif
67499-#ifdef CONFIG_STACKTRACE
67500+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67501 ONE("stack", S_IRUSR, proc_pid_stack),
67502 #endif
67503 #ifdef CONFIG_SCHEDSTATS
67504diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
67505index cbd82df..c0407d2 100644
67506--- a/fs/proc/cmdline.c
67507+++ b/fs/proc/cmdline.c
67508@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
67509
67510 static int __init proc_cmdline_init(void)
67511 {
67512+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67513+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
67514+#else
67515 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
67516+#endif
67517 return 0;
67518 }
67519 fs_initcall(proc_cmdline_init);
67520diff --git a/fs/proc/devices.c b/fs/proc/devices.c
67521index 50493ed..248166b 100644
67522--- a/fs/proc/devices.c
67523+++ b/fs/proc/devices.c
67524@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
67525
67526 static int __init proc_devices_init(void)
67527 {
67528+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67529+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
67530+#else
67531 proc_create("devices", 0, NULL, &proc_devinfo_operations);
67532+#endif
67533 return 0;
67534 }
67535 fs_initcall(proc_devices_init);
67536diff --git a/fs/proc/fd.c b/fs/proc/fd.c
67537index 8e5ad83..1f07a8c 100644
67538--- a/fs/proc/fd.c
67539+++ b/fs/proc/fd.c
67540@@ -26,7 +26,8 @@ static int seq_show(struct seq_file *m, void *v)
67541 if (!task)
67542 return -ENOENT;
67543
67544- files = get_files_struct(task);
67545+ if (!gr_acl_handle_procpidmem(task))
67546+ files = get_files_struct(task);
67547 put_task_struct(task);
67548
67549 if (files) {
67550@@ -284,11 +285,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
67551 */
67552 int proc_fd_permission(struct inode *inode, int mask)
67553 {
67554+ struct task_struct *task;
67555 int rv = generic_permission(inode, mask);
67556- if (rv == 0)
67557- return 0;
67558+
67559 if (task_tgid(current) == proc_pid(inode))
67560 rv = 0;
67561+
67562+ task = get_proc_task(inode);
67563+ if (task == NULL)
67564+ return rv;
67565+
67566+ if (gr_acl_handle_procpidmem(task))
67567+ rv = -EACCES;
67568+
67569+ put_task_struct(task);
67570+
67571 return rv;
67572 }
67573
67574diff --git a/fs/proc/generic.c b/fs/proc/generic.c
67575index be65b20..2998ba8 100644
67576--- a/fs/proc/generic.c
67577+++ b/fs/proc/generic.c
67578@@ -22,6 +22,7 @@
67579 #include <linux/bitops.h>
67580 #include <linux/spinlock.h>
67581 #include <linux/completion.h>
67582+#include <linux/grsecurity.h>
67583 #include <asm/uaccess.h>
67584
67585 #include "internal.h"
67586@@ -253,6 +254,15 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
67587 return proc_lookup_de(PDE(dir), dir, dentry);
67588 }
67589
67590+struct dentry *proc_lookup_restrict(struct inode *dir, struct dentry *dentry,
67591+ unsigned int flags)
67592+{
67593+ if (gr_proc_is_restricted())
67594+ return ERR_PTR(-EACCES);
67595+
67596+ return proc_lookup_de(PDE(dir), dir, dentry);
67597+}
67598+
67599 /*
67600 * This returns non-zero if at EOF, so that the /proc
67601 * root directory can use this and check if it should
67602@@ -310,6 +320,16 @@ int proc_readdir(struct file *file, struct dir_context *ctx)
67603 return proc_readdir_de(PDE(inode), file, ctx);
67604 }
67605
67606+int proc_readdir_restrict(struct file *file, struct dir_context *ctx)
67607+{
67608+ struct inode *inode = file_inode(file);
67609+
67610+ if (gr_proc_is_restricted())
67611+ return -EACCES;
67612+
67613+ return proc_readdir_de(PDE(inode), file, ctx);
67614+}
67615+
67616 /*
67617 * These are the generic /proc directory operations. They
67618 * use the in-memory "struct proc_dir_entry" tree to parse
67619@@ -321,6 +341,12 @@ static const struct file_operations proc_dir_operations = {
67620 .iterate = proc_readdir,
67621 };
67622
67623+static const struct file_operations proc_dir_restricted_operations = {
67624+ .llseek = generic_file_llseek,
67625+ .read = generic_read_dir,
67626+ .iterate = proc_readdir_restrict,
67627+};
67628+
67629 /*
67630 * proc directories can do almost nothing..
67631 */
67632@@ -330,6 +356,12 @@ static const struct inode_operations proc_dir_inode_operations = {
67633 .setattr = proc_notify_change,
67634 };
67635
67636+static const struct inode_operations proc_dir_restricted_inode_operations = {
67637+ .lookup = proc_lookup_restrict,
67638+ .getattr = proc_getattr,
67639+ .setattr = proc_notify_change,
67640+};
67641+
67642 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
67643 {
67644 int ret;
67645@@ -441,6 +473,31 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
67646 }
67647 EXPORT_SYMBOL_GPL(proc_mkdir_data);
67648
67649+struct proc_dir_entry *proc_mkdir_data_restrict(const char *name, umode_t mode,
67650+ struct proc_dir_entry *parent, void *data)
67651+{
67652+ struct proc_dir_entry *ent;
67653+
67654+ if (mode == 0)
67655+ mode = S_IRUGO | S_IXUGO;
67656+
67657+ ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
67658+ if (ent) {
67659+ ent->data = data;
67660+ ent->restricted = 1;
67661+ ent->proc_fops = &proc_dir_restricted_operations;
67662+ ent->proc_iops = &proc_dir_restricted_inode_operations;
67663+ parent->nlink++;
67664+ if (proc_register(parent, ent) < 0) {
67665+ kfree(ent);
67666+ parent->nlink--;
67667+ ent = NULL;
67668+ }
67669+ }
67670+ return ent;
67671+}
67672+EXPORT_SYMBOL_GPL(proc_mkdir_data_restrict);
67673+
67674 struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
67675 struct proc_dir_entry *parent)
67676 {
67677@@ -455,6 +512,13 @@ struct proc_dir_entry *proc_mkdir(const char *name,
67678 }
67679 EXPORT_SYMBOL(proc_mkdir);
67680
67681+struct proc_dir_entry *proc_mkdir_restrict(const char *name,
67682+ struct proc_dir_entry *parent)
67683+{
67684+ return proc_mkdir_data_restrict(name, 0, parent, NULL);
67685+}
67686+EXPORT_SYMBOL(proc_mkdir_restrict);
67687+
67688 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
67689 struct proc_dir_entry *parent,
67690 const struct file_operations *proc_fops,
67691diff --git a/fs/proc/inode.c b/fs/proc/inode.c
67692index 7697b66..8d8e541 100644
67693--- a/fs/proc/inode.c
67694+++ b/fs/proc/inode.c
67695@@ -24,11 +24,17 @@
67696 #include <linux/mount.h>
67697 #include <linux/magic.h>
67698 #include <linux/namei.h>
67699+#include <linux/grsecurity.h>
67700
67701 #include <asm/uaccess.h>
67702
67703 #include "internal.h"
67704
67705+#ifdef CONFIG_PROC_SYSCTL
67706+extern const struct inode_operations proc_sys_inode_operations;
67707+extern const struct inode_operations proc_sys_dir_operations;
67708+#endif
67709+
67710 static void proc_evict_inode(struct inode *inode)
67711 {
67712 struct proc_dir_entry *de;
67713@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
67714 RCU_INIT_POINTER(PROC_I(inode)->sysctl, NULL);
67715 sysctl_head_put(head);
67716 }
67717+
67718+#ifdef CONFIG_PROC_SYSCTL
67719+ if (inode->i_op == &proc_sys_inode_operations ||
67720+ inode->i_op == &proc_sys_dir_operations)
67721+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
67722+#endif
67723+
67724 }
67725
67726 static struct kmem_cache * proc_inode_cachep;
67727@@ -426,7 +439,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
67728 if (de->mode) {
67729 inode->i_mode = de->mode;
67730 inode->i_uid = de->uid;
67731+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67732+ inode->i_gid = grsec_proc_gid;
67733+#else
67734 inode->i_gid = de->gid;
67735+#endif
67736 }
67737 if (de->size)
67738 inode->i_size = de->size;
67739diff --git a/fs/proc/internal.h b/fs/proc/internal.h
67740index c835b94..c9e01a3 100644
67741--- a/fs/proc/internal.h
67742+++ b/fs/proc/internal.h
67743@@ -47,9 +47,10 @@ struct proc_dir_entry {
67744 struct completion *pde_unload_completion;
67745 struct list_head pde_openers; /* who did ->open, but not ->release */
67746 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
67747+ u8 restricted; /* a directory in /proc/net that should be restricted via GRKERNSEC_PROC */
67748 u8 namelen;
67749 char name[];
67750-};
67751+} __randomize_layout;
67752
67753 union proc_op {
67754 int (*proc_get_link)(struct dentry *, struct path *);
67755@@ -67,7 +68,7 @@ struct proc_inode {
67756 struct ctl_table *sysctl_entry;
67757 const struct proc_ns_operations *ns_ops;
67758 struct inode vfs_inode;
67759-};
67760+} __randomize_layout;
67761
67762 /*
67763 * General functions
67764@@ -155,6 +156,10 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
67765 struct pid *, struct task_struct *);
67766 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
67767 struct pid *, struct task_struct *);
67768+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
67769+extern int proc_pid_ipaddr(struct seq_file *, struct pid_namespace *,
67770+ struct pid *, struct task_struct *);
67771+#endif
67772
67773 /*
67774 * base.c
67775@@ -179,9 +184,11 @@ extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, i
67776 * generic.c
67777 */
67778 extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
67779+extern struct dentry *proc_lookup_restrict(struct inode *, struct dentry *, unsigned int);
67780 extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *,
67781 struct dentry *);
67782 extern int proc_readdir(struct file *, struct dir_context *);
67783+extern int proc_readdir_restrict(struct file *, struct dir_context *);
67784 extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *);
67785
67786 static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
67787diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c
67788index a352d57..cb94a5c 100644
67789--- a/fs/proc/interrupts.c
67790+++ b/fs/proc/interrupts.c
67791@@ -47,7 +47,11 @@ static const struct file_operations proc_interrupts_operations = {
67792
67793 static int __init proc_interrupts_init(void)
67794 {
67795+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67796+ proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations);
67797+#else
67798 proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
67799+#endif
67800 return 0;
67801 }
67802 fs_initcall(proc_interrupts_init);
67803diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
67804index 91a4e64..14bf8fa 100644
67805--- a/fs/proc/kcore.c
67806+++ b/fs/proc/kcore.c
67807@@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
67808 * the addresses in the elf_phdr on our list.
67809 */
67810 start = kc_offset_to_vaddr(*fpos - elf_buflen);
67811- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
67812+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
67813+ if (tsz > buflen)
67814 tsz = buflen;
67815-
67816+
67817 while (buflen) {
67818 struct kcore_list *m;
67819
67820@@ -515,19 +516,20 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
67821 } else {
67822 if (kern_addr_valid(start)) {
67823 unsigned long n;
67824+ char *elf_buf;
67825+ mm_segment_t oldfs;
67826
67827- n = copy_to_user(buffer, (char *)start, tsz);
67828- /*
67829- * We cannot distinguish between fault on source
67830- * and fault on destination. When this happens
67831- * we clear too and hope it will trigger the
67832- * EFAULT again.
67833- */
67834- if (n) {
67835- if (clear_user(buffer + tsz - n,
67836- n))
67837- return -EFAULT;
67838- }
67839+ elf_buf = kzalloc(tsz, GFP_KERNEL);
67840+ if (!elf_buf)
67841+ return -ENOMEM;
67842+ oldfs = get_fs();
67843+ set_fs(KERNEL_DS);
67844+ n = __copy_from_user(elf_buf, (const void __user *)start, tsz);
67845+ set_fs(oldfs);
67846+ n = copy_to_user(buffer, elf_buf, tsz);
67847+ kfree(elf_buf);
67848+ if (n)
67849+ return -EFAULT;
67850 } else {
67851 if (clear_user(buffer, tsz))
67852 return -EFAULT;
67853@@ -547,6 +549,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
67854
67855 static int open_kcore(struct inode *inode, struct file *filp)
67856 {
67857+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
67858+ return -EPERM;
67859+#endif
67860 if (!capable(CAP_SYS_RAWIO))
67861 return -EPERM;
67862 if (kcore_need_update)
67863@@ -580,7 +585,7 @@ static int __meminit kcore_callback(struct notifier_block *self,
67864 return NOTIFY_OK;
67865 }
67866
67867-static struct notifier_block kcore_callback_nb __meminitdata = {
67868+static struct notifier_block kcore_callback_nb __meminitconst = {
67869 .notifier_call = kcore_callback,
67870 .priority = 0,
67871 };
67872diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
67873index d3ebf2e..6ad42d1 100644
67874--- a/fs/proc/meminfo.c
67875+++ b/fs/proc/meminfo.c
67876@@ -194,7 +194,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
67877 vmi.used >> 10,
67878 vmi.largest_chunk >> 10
67879 #ifdef CONFIG_MEMORY_FAILURE
67880- , atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
67881+ , atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
67882 #endif
67883 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
67884 , K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
67885diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
67886index d4a3574..b421ce9 100644
67887--- a/fs/proc/nommu.c
67888+++ b/fs/proc/nommu.c
67889@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
67890
67891 if (file) {
67892 seq_pad(m, ' ');
67893- seq_path(m, &file->f_path, "");
67894+ seq_path(m, &file->f_path, "\n\\");
67895 }
67896
67897 seq_putc(m, '\n');
67898diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
67899index 1bde894..22ac7eb 100644
67900--- a/fs/proc/proc_net.c
67901+++ b/fs/proc/proc_net.c
67902@@ -23,9 +23,27 @@
67903 #include <linux/nsproxy.h>
67904 #include <net/net_namespace.h>
67905 #include <linux/seq_file.h>
67906+#include <linux/grsecurity.h>
67907
67908 #include "internal.h"
67909
67910+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
67911+static struct seq_operations *ipv6_seq_ops_addr;
67912+
67913+void register_ipv6_seq_ops_addr(struct seq_operations *addr)
67914+{
67915+ ipv6_seq_ops_addr = addr;
67916+}
67917+
67918+void unregister_ipv6_seq_ops_addr(void)
67919+{
67920+ ipv6_seq_ops_addr = NULL;
67921+}
67922+
67923+EXPORT_SYMBOL_GPL(register_ipv6_seq_ops_addr);
67924+EXPORT_SYMBOL_GPL(unregister_ipv6_seq_ops_addr);
67925+#endif
67926+
67927 static inline struct net *PDE_NET(struct proc_dir_entry *pde)
67928 {
67929 return pde->parent->data;
67930@@ -36,6 +54,8 @@ static struct net *get_proc_net(const struct inode *inode)
67931 return maybe_get_net(PDE_NET(PDE(inode)));
67932 }
67933
67934+extern const struct seq_operations dev_seq_ops;
67935+
67936 int seq_open_net(struct inode *ino, struct file *f,
67937 const struct seq_operations *ops, int size)
67938 {
67939@@ -44,6 +64,14 @@ int seq_open_net(struct inode *ino, struct file *f,
67940
67941 BUG_ON(size < sizeof(*p));
67942
67943+ /* only permit access to /proc/net/dev */
67944+ if (
67945+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
67946+ ops != ipv6_seq_ops_addr &&
67947+#endif
67948+ ops != &dev_seq_ops && gr_proc_is_restricted())
67949+ return -EACCES;
67950+
67951 net = get_proc_net(ino);
67952 if (net == NULL)
67953 return -ENXIO;
67954@@ -66,6 +94,9 @@ int single_open_net(struct inode *inode, struct file *file,
67955 int err;
67956 struct net *net;
67957
67958+ if (gr_proc_is_restricted())
67959+ return -EACCES;
67960+
67961 err = -ENXIO;
67962 net = get_proc_net(inode);
67963 if (net == NULL)
67964diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
67965index f92d5dd..26398ac 100644
67966--- a/fs/proc/proc_sysctl.c
67967+++ b/fs/proc/proc_sysctl.c
67968@@ -11,13 +11,21 @@
67969 #include <linux/namei.h>
67970 #include <linux/mm.h>
67971 #include <linux/module.h>
67972+#include <linux/nsproxy.h>
67973+#ifdef CONFIG_GRKERNSEC
67974+#include <net/net_namespace.h>
67975+#endif
67976 #include "internal.h"
67977
67978+extern int gr_handle_chroot_sysctl(const int op);
67979+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
67980+ const int op);
67981+
67982 static const struct dentry_operations proc_sys_dentry_operations;
67983 static const struct file_operations proc_sys_file_operations;
67984-static const struct inode_operations proc_sys_inode_operations;
67985+const struct inode_operations proc_sys_inode_operations;
67986 static const struct file_operations proc_sys_dir_file_operations;
67987-static const struct inode_operations proc_sys_dir_operations;
67988+const struct inode_operations proc_sys_dir_operations;
67989
67990 void proc_sys_poll_notify(struct ctl_table_poll *poll)
67991 {
67992@@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
67993
67994 err = NULL;
67995 d_set_d_op(dentry, &proc_sys_dentry_operations);
67996+
67997+ gr_handle_proc_create(dentry, inode);
67998+
67999 d_add(dentry, inode);
68000
68001 out:
68002@@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
68003 struct inode *inode = file_inode(filp);
68004 struct ctl_table_header *head = grab_header(inode);
68005 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
68006+ int op = write ? MAY_WRITE : MAY_READ;
68007 ssize_t error;
68008 size_t res;
68009
68010@@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
68011 * and won't be until we finish.
68012 */
68013 error = -EPERM;
68014- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
68015+ if (sysctl_perm(head, table, op))
68016 goto out;
68017
68018 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
68019@@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
68020 if (!table->proc_handler)
68021 goto out;
68022
68023+#ifdef CONFIG_GRKERNSEC
68024+ error = -EPERM;
68025+ if (gr_handle_chroot_sysctl(op))
68026+ goto out;
68027+ dget(filp->f_path.dentry);
68028+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
68029+ dput(filp->f_path.dentry);
68030+ goto out;
68031+ }
68032+ dput(filp->f_path.dentry);
68033+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
68034+ goto out;
68035+ if (write) {
68036+ if (current->nsproxy->net_ns != table->extra2) {
68037+ if (!capable(CAP_SYS_ADMIN))
68038+ goto out;
68039+ } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
68040+ goto out;
68041+ }
68042+#endif
68043+
68044 /* careful: calling conventions are nasty here */
68045 res = count;
68046 error = table->proc_handler(table, write, buf, &res, ppos);
68047@@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file,
68048 return false;
68049 } else {
68050 d_set_d_op(child, &proc_sys_dentry_operations);
68051+
68052+ gr_handle_proc_create(child, inode);
68053+
68054 d_add(child, inode);
68055 }
68056 } else {
68057@@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, struct ctl_table *table,
68058 if ((*pos)++ < ctx->pos)
68059 return true;
68060
68061+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
68062+ return 0;
68063+
68064 if (unlikely(S_ISLNK(table->mode)))
68065 res = proc_sys_link_fill_cache(file, ctx, head, table);
68066 else
68067@@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
68068 if (IS_ERR(head))
68069 return PTR_ERR(head);
68070
68071+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
68072+ return -ENOENT;
68073+
68074 generic_fillattr(inode, stat);
68075 if (table)
68076 stat->mode = (stat->mode & S_IFMT) | table->mode;
68077@@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
68078 .llseek = generic_file_llseek,
68079 };
68080
68081-static const struct inode_operations proc_sys_inode_operations = {
68082+const struct inode_operations proc_sys_inode_operations = {
68083 .permission = proc_sys_permission,
68084 .setattr = proc_sys_setattr,
68085 .getattr = proc_sys_getattr,
68086 };
68087
68088-static const struct inode_operations proc_sys_dir_operations = {
68089+const struct inode_operations proc_sys_dir_operations = {
68090 .lookup = proc_sys_lookup,
68091 .permission = proc_sys_permission,
68092 .setattr = proc_sys_setattr,
68093@@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
68094 static struct ctl_dir *new_dir(struct ctl_table_set *set,
68095 const char *name, int namelen)
68096 {
68097- struct ctl_table *table;
68098+ ctl_table_no_const *table;
68099 struct ctl_dir *new;
68100 struct ctl_node *node;
68101 char *new_name;
68102@@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
68103 return NULL;
68104
68105 node = (struct ctl_node *)(new + 1);
68106- table = (struct ctl_table *)(node + 1);
68107+ table = (ctl_table_no_const *)(node + 1);
68108 new_name = (char *)(table + 2);
68109 memcpy(new_name, name, namelen);
68110 new_name[namelen] = '\0';
68111@@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
68112 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
68113 struct ctl_table_root *link_root)
68114 {
68115- struct ctl_table *link_table, *entry, *link;
68116+ ctl_table_no_const *link_table, *link;
68117+ struct ctl_table *entry;
68118 struct ctl_table_header *links;
68119 struct ctl_node *node;
68120 char *link_name;
68121@@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
68122 return NULL;
68123
68124 node = (struct ctl_node *)(links + 1);
68125- link_table = (struct ctl_table *)(node + nr_entries);
68126+ link_table = (ctl_table_no_const *)(node + nr_entries);
68127 link_name = (char *)&link_table[nr_entries + 1];
68128
68129 for (link = link_table, entry = table; entry->procname; link++, entry++) {
68130@@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
68131 struct ctl_table_header ***subheader, struct ctl_table_set *set,
68132 struct ctl_table *table)
68133 {
68134- struct ctl_table *ctl_table_arg = NULL;
68135- struct ctl_table *entry, *files;
68136+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
68137+ struct ctl_table *entry;
68138 int nr_files = 0;
68139 int nr_dirs = 0;
68140 int err = -ENOMEM;
68141@@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
68142 nr_files++;
68143 }
68144
68145- files = table;
68146 /* If there are mixed files and directories we need a new table */
68147 if (nr_dirs && nr_files) {
68148- struct ctl_table *new;
68149+ ctl_table_no_const *new;
68150 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
68151 GFP_KERNEL);
68152 if (!files)
68153@@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
68154 /* Register everything except a directory full of subdirectories */
68155 if (nr_files || !nr_dirs) {
68156 struct ctl_table_header *header;
68157- header = __register_sysctl_table(set, path, files);
68158+ header = __register_sysctl_table(set, path, files ? files : table);
68159 if (!header) {
68160 kfree(ctl_table_arg);
68161 goto out;
68162diff --git a/fs/proc/root.c b/fs/proc/root.c
68163index e74ac9f..35e89f4 100644
68164--- a/fs/proc/root.c
68165+++ b/fs/proc/root.c
68166@@ -188,7 +188,15 @@ void __init proc_root_init(void)
68167 proc_mkdir("openprom", NULL);
68168 #endif
68169 proc_tty_init();
68170+#ifdef CONFIG_GRKERNSEC_PROC_ADD
68171+#ifdef CONFIG_GRKERNSEC_PROC_USER
68172+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
68173+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68174+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
68175+#endif
68176+#else
68177 proc_mkdir("bus", NULL);
68178+#endif
68179 proc_sys_init();
68180 }
68181
68182diff --git a/fs/proc/stat.c b/fs/proc/stat.c
68183index 510413eb..34d9a8c 100644
68184--- a/fs/proc/stat.c
68185+++ b/fs/proc/stat.c
68186@@ -11,6 +11,7 @@
68187 #include <linux/irqnr.h>
68188 #include <linux/cputime.h>
68189 #include <linux/tick.h>
68190+#include <linux/grsecurity.h>
68191
68192 #ifndef arch_irq_stat_cpu
68193 #define arch_irq_stat_cpu(cpu) 0
68194@@ -87,6 +88,18 @@ static int show_stat(struct seq_file *p, void *v)
68195 u64 sum_softirq = 0;
68196 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
68197 struct timespec boottime;
68198+ int unrestricted = 1;
68199+
68200+#ifdef CONFIG_GRKERNSEC_PROC_ADD
68201+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68202+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
68203+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
68204+ && !in_group_p(grsec_proc_gid)
68205+#endif
68206+ )
68207+ unrestricted = 0;
68208+#endif
68209+#endif
68210
68211 user = nice = system = idle = iowait =
68212 irq = softirq = steal = 0;
68213@@ -99,23 +112,25 @@ static int show_stat(struct seq_file *p, void *v)
68214 nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
68215 system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
68216 idle += get_idle_time(i);
68217- iowait += get_iowait_time(i);
68218- irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
68219- softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
68220- steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
68221- guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
68222- guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
68223- sum += kstat_cpu_irqs_sum(i);
68224- sum += arch_irq_stat_cpu(i);
68225+ if (unrestricted) {
68226+ iowait += get_iowait_time(i);
68227+ irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
68228+ softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
68229+ steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
68230+ guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
68231+ guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
68232+ sum += kstat_cpu_irqs_sum(i);
68233+ sum += arch_irq_stat_cpu(i);
68234+ for (j = 0; j < NR_SOFTIRQS; j++) {
68235+ unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
68236
68237- for (j = 0; j < NR_SOFTIRQS; j++) {
68238- unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
68239-
68240- per_softirq_sums[j] += softirq_stat;
68241- sum_softirq += softirq_stat;
68242+ per_softirq_sums[j] += softirq_stat;
68243+ sum_softirq += softirq_stat;
68244+ }
68245 }
68246 }
68247- sum += arch_irq_stat();
68248+ if (unrestricted)
68249+ sum += arch_irq_stat();
68250
68251 seq_puts(p, "cpu ");
68252 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
68253@@ -136,12 +151,14 @@ static int show_stat(struct seq_file *p, void *v)
68254 nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
68255 system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
68256 idle = get_idle_time(i);
68257- iowait = get_iowait_time(i);
68258- irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
68259- softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
68260- steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
68261- guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
68262- guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
68263+ if (unrestricted) {
68264+ iowait = get_iowait_time(i);
68265+ irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
68266+ softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
68267+ steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
68268+ guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
68269+ guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
68270+ }
68271 seq_printf(p, "cpu%d", i);
68272 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
68273 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
68274@@ -159,7 +176,7 @@ static int show_stat(struct seq_file *p, void *v)
68275
68276 /* sum again ? it could be updated? */
68277 for_each_irq_nr(j)
68278- seq_put_decimal_ull(p, ' ', kstat_irqs_usr(j));
68279+ seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs_usr(j) : 0ULL);
68280
68281 seq_printf(p,
68282 "\nctxt %llu\n"
68283@@ -167,11 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
68284 "processes %lu\n"
68285 "procs_running %lu\n"
68286 "procs_blocked %lu\n",
68287- nr_context_switches(),
68288+ unrestricted ? nr_context_switches() : 0ULL,
68289 (unsigned long)jif,
68290- total_forks,
68291- nr_running(),
68292- nr_iowait());
68293+ unrestricted ? total_forks : 0UL,
68294+ unrestricted ? nr_running() : 0UL,
68295+ unrestricted ? nr_iowait() : 0UL);
68296
68297 seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
68298
68299diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
68300index 6dee68d..1b4add0 100644
68301--- a/fs/proc/task_mmu.c
68302+++ b/fs/proc/task_mmu.c
68303@@ -13,12 +13,19 @@
68304 #include <linux/swap.h>
68305 #include <linux/swapops.h>
68306 #include <linux/mmu_notifier.h>
68307+#include <linux/grsecurity.h>
68308
68309 #include <asm/elf.h>
68310 #include <asm/uaccess.h>
68311 #include <asm/tlbflush.h>
68312 #include "internal.h"
68313
68314+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68315+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
68316+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
68317+ _mm->pax_flags & MF_PAX_SEGMEXEC))
68318+#endif
68319+
68320 void task_mem(struct seq_file *m, struct mm_struct *mm)
68321 {
68322 unsigned long data, text, lib, swap, ptes, pmds;
68323@@ -57,8 +64,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
68324 "VmLib:\t%8lu kB\n"
68325 "VmPTE:\t%8lu kB\n"
68326 "VmPMD:\t%8lu kB\n"
68327- "VmSwap:\t%8lu kB\n",
68328- hiwater_vm << (PAGE_SHIFT-10),
68329+ "VmSwap:\t%8lu kB\n"
68330+
68331+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
68332+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
68333+#endif
68334+
68335+ ,hiwater_vm << (PAGE_SHIFT-10),
68336 total_vm << (PAGE_SHIFT-10),
68337 mm->locked_vm << (PAGE_SHIFT-10),
68338 mm->pinned_vm << (PAGE_SHIFT-10),
68339@@ -68,7 +80,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
68340 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
68341 ptes >> 10,
68342 pmds >> 10,
68343- swap << (PAGE_SHIFT-10));
68344+ swap << (PAGE_SHIFT-10)
68345+
68346+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
68347+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68348+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
68349+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
68350+#else
68351+ , mm->context.user_cs_base
68352+ , mm->context.user_cs_limit
68353+#endif
68354+#endif
68355+
68356+ );
68357 }
68358
68359 unsigned long task_vsize(struct mm_struct *mm)
68360@@ -285,13 +309,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
68361 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
68362 }
68363
68364- /* We don't show the stack guard page in /proc/maps */
68365+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68366+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
68367+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
68368+#else
68369 start = vma->vm_start;
68370- if (stack_guard_page_start(vma, start))
68371- start += PAGE_SIZE;
68372 end = vma->vm_end;
68373- if (stack_guard_page_end(vma, end))
68374- end -= PAGE_SIZE;
68375+#endif
68376
68377 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
68378 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
68379@@ -301,7 +325,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
68380 flags & VM_WRITE ? 'w' : '-',
68381 flags & VM_EXEC ? 'x' : '-',
68382 flags & VM_MAYSHARE ? 's' : 'p',
68383+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68384+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
68385+#else
68386 pgoff,
68387+#endif
68388 MAJOR(dev), MINOR(dev), ino);
68389
68390 /*
68391@@ -310,7 +338,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
68392 */
68393 if (file) {
68394 seq_pad(m, ' ');
68395- seq_path(m, &file->f_path, "\n");
68396+ seq_path(m, &file->f_path, "\n\\");
68397 goto done;
68398 }
68399
68400@@ -341,8 +369,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
68401 * Thread stack in /proc/PID/task/TID/maps or
68402 * the main process stack.
68403 */
68404- if (!is_pid || (vma->vm_start <= mm->start_stack &&
68405- vma->vm_end >= mm->start_stack)) {
68406+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
68407+ (vma->vm_start <= mm->start_stack &&
68408+ vma->vm_end >= mm->start_stack)) {
68409 name = "[stack]";
68410 } else {
68411 /* Thread stack in /proc/PID/maps */
68412@@ -362,6 +391,12 @@ done:
68413
68414 static int show_map(struct seq_file *m, void *v, int is_pid)
68415 {
68416+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68417+ if (current->exec_id != m->exec_id) {
68418+ gr_log_badprocpid("maps");
68419+ return 0;
68420+ }
68421+#endif
68422 show_map_vma(m, v, is_pid);
68423 m_cache_vma(m, v);
68424 return 0;
68425@@ -620,9 +655,18 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
68426 .private = &mss,
68427 };
68428
68429+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68430+ if (current->exec_id != m->exec_id) {
68431+ gr_log_badprocpid("smaps");
68432+ return 0;
68433+ }
68434+#endif
68435 memset(&mss, 0, sizeof mss);
68436- /* mmap_sem is held in m_start */
68437- walk_page_vma(vma, &smaps_walk);
68438+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68439+ if (!PAX_RAND_FLAGS(vma->vm_mm))
68440+#endif
68441+ /* mmap_sem is held in m_start */
68442+ walk_page_vma(vma, &smaps_walk);
68443
68444 show_map_vma(m, vma, is_pid);
68445
68446@@ -641,7 +685,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
68447 "KernelPageSize: %8lu kB\n"
68448 "MMUPageSize: %8lu kB\n"
68449 "Locked: %8lu kB\n",
68450+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68451+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
68452+#else
68453 (vma->vm_end - vma->vm_start) >> 10,
68454+#endif
68455 mss.resident >> 10,
68456 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
68457 mss.shared_clean >> 10,
68458@@ -1491,6 +1539,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
68459 char buffer[64];
68460 int nid;
68461
68462+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68463+ if (current->exec_id != m->exec_id) {
68464+ gr_log_badprocpid("numa_maps");
68465+ return 0;
68466+ }
68467+#endif
68468+
68469 if (!mm)
68470 return 0;
68471
68472@@ -1505,11 +1560,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
68473 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
68474 }
68475
68476+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68477+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
68478+#else
68479 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
68480+#endif
68481
68482 if (file) {
68483 seq_puts(m, " file=");
68484- seq_path(m, &file->f_path, "\n\t= ");
68485+ seq_path(m, &file->f_path, "\n\t\\= ");
68486 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
68487 seq_puts(m, " heap");
68488 } else {
68489diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
68490index 599ec2e..f1413ae 100644
68491--- a/fs/proc/task_nommu.c
68492+++ b/fs/proc/task_nommu.c
68493@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
68494 else
68495 bytes += kobjsize(mm);
68496
68497- if (current->fs && current->fs->users > 1)
68498+ if (current->fs && atomic_read(&current->fs->users) > 1)
68499 sbytes += kobjsize(current->fs);
68500 else
68501 bytes += kobjsize(current->fs);
68502@@ -180,7 +180,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
68503
68504 if (file) {
68505 seq_pad(m, ' ');
68506- seq_path(m, &file->f_path, "");
68507+ seq_path(m, &file->f_path, "\n\\");
68508 } else if (mm) {
68509 pid_t tid = pid_of_stack(priv, vma, is_pid);
68510
68511diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
68512index 4e61388..1a2523d 100644
68513--- a/fs/proc/vmcore.c
68514+++ b/fs/proc/vmcore.c
68515@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
68516 nr_bytes = count;
68517
68518 /* If pfn is not ram, return zeros for sparse dump files */
68519- if (pfn_is_ram(pfn) == 0)
68520- memset(buf, 0, nr_bytes);
68521- else {
68522+ if (pfn_is_ram(pfn) == 0) {
68523+ if (userbuf) {
68524+ if (clear_user((char __force_user *)buf, nr_bytes))
68525+ return -EFAULT;
68526+ } else
68527+ memset(buf, 0, nr_bytes);
68528+ } else {
68529 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
68530 offset, userbuf);
68531 if (tmp < 0)
68532@@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
68533 static int copy_to(void *target, void *src, size_t size, int userbuf)
68534 {
68535 if (userbuf) {
68536- if (copy_to_user((char __user *) target, src, size))
68537+ if (copy_to_user((char __force_user *) target, src, size))
68538 return -EFAULT;
68539 } else {
68540 memcpy(target, src, size);
68541@@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
68542 if (*fpos < m->offset + m->size) {
68543 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
68544 start = m->paddr + *fpos - m->offset;
68545- tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
68546+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
68547 if (tmp < 0)
68548 return tmp;
68549 buflen -= tsz;
68550@@ -253,7 +257,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
68551 static ssize_t read_vmcore(struct file *file, char __user *buffer,
68552 size_t buflen, loff_t *fpos)
68553 {
68554- return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
68555+ return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1);
68556 }
68557
68558 /*
68559diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
68560index d3fb2b6..43a8140 100644
68561--- a/fs/qnx6/qnx6.h
68562+++ b/fs/qnx6/qnx6.h
68563@@ -74,7 +74,7 @@ enum {
68564 BYTESEX_BE,
68565 };
68566
68567-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
68568+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
68569 {
68570 if (sbi->s_bytesex == BYTESEX_LE)
68571 return le64_to_cpu((__force __le64)n);
68572@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
68573 return (__force __fs64)cpu_to_be64(n);
68574 }
68575
68576-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
68577+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
68578 {
68579 if (sbi->s_bytesex == BYTESEX_LE)
68580 return le32_to_cpu((__force __le32)n);
68581diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
68582index bb2869f..d34ada8 100644
68583--- a/fs/quota/netlink.c
68584+++ b/fs/quota/netlink.c
68585@@ -44,7 +44,7 @@ static struct genl_family quota_genl_family = {
68586 void quota_send_warning(struct kqid qid, dev_t dev,
68587 const char warntype)
68588 {
68589- static atomic_t seq;
68590+ static atomic_unchecked_t seq;
68591 struct sk_buff *skb;
68592 void *msg_head;
68593 int ret;
68594@@ -60,7 +60,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
68595 "VFS: Not enough memory to send quota warning.\n");
68596 return;
68597 }
68598- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
68599+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
68600 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
68601 if (!msg_head) {
68602 printk(KERN_ERR
68603diff --git a/fs/read_write.c b/fs/read_write.c
68604index 8e1b687..bad2eec 100644
68605--- a/fs/read_write.c
68606+++ b/fs/read_write.c
68607@@ -553,7 +553,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
68608
68609 old_fs = get_fs();
68610 set_fs(get_ds());
68611- p = (__force const char __user *)buf;
68612+ p = (const char __force_user *)buf;
68613 if (count > MAX_RW_COUNT)
68614 count = MAX_RW_COUNT;
68615 if (file->f_op->write)
68616diff --git a/fs/readdir.c b/fs/readdir.c
68617index ced6791..936687b 100644
68618--- a/fs/readdir.c
68619+++ b/fs/readdir.c
68620@@ -18,6 +18,7 @@
68621 #include <linux/security.h>
68622 #include <linux/syscalls.h>
68623 #include <linux/unistd.h>
68624+#include <linux/namei.h>
68625
68626 #include <asm/uaccess.h>
68627
68628@@ -71,6 +72,7 @@ struct old_linux_dirent {
68629 struct readdir_callback {
68630 struct dir_context ctx;
68631 struct old_linux_dirent __user * dirent;
68632+ struct file * file;
68633 int result;
68634 };
68635
68636@@ -89,6 +91,10 @@ static int fillonedir(struct dir_context *ctx, const char *name, int namlen,
68637 buf->result = -EOVERFLOW;
68638 return -EOVERFLOW;
68639 }
68640+
68641+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
68642+ return 0;
68643+
68644 buf->result++;
68645 dirent = buf->dirent;
68646 if (!access_ok(VERIFY_WRITE, dirent,
68647@@ -120,6 +126,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
68648 if (!f.file)
68649 return -EBADF;
68650
68651+ buf.file = f.file;
68652 error = iterate_dir(f.file, &buf.ctx);
68653 if (buf.result)
68654 error = buf.result;
68655@@ -145,6 +152,7 @@ struct getdents_callback {
68656 struct dir_context ctx;
68657 struct linux_dirent __user * current_dir;
68658 struct linux_dirent __user * previous;
68659+ struct file * file;
68660 int count;
68661 int error;
68662 };
68663@@ -167,6 +175,10 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
68664 buf->error = -EOVERFLOW;
68665 return -EOVERFLOW;
68666 }
68667+
68668+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
68669+ return 0;
68670+
68671 dirent = buf->previous;
68672 if (dirent) {
68673 if (__put_user(offset, &dirent->d_off))
68674@@ -212,6 +224,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
68675 if (!f.file)
68676 return -EBADF;
68677
68678+ buf.file = f.file;
68679 error = iterate_dir(f.file, &buf.ctx);
68680 if (error >= 0)
68681 error = buf.error;
68682@@ -230,6 +243,7 @@ struct getdents_callback64 {
68683 struct dir_context ctx;
68684 struct linux_dirent64 __user * current_dir;
68685 struct linux_dirent64 __user * previous;
68686+ struct file *file;
68687 int count;
68688 int error;
68689 };
68690@@ -246,6 +260,10 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
68691 buf->error = -EINVAL; /* only used if we fail.. */
68692 if (reclen > buf->count)
68693 return -EINVAL;
68694+
68695+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
68696+ return 0;
68697+
68698 dirent = buf->previous;
68699 if (dirent) {
68700 if (__put_user(offset, &dirent->d_off))
68701@@ -293,6 +311,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
68702 if (!f.file)
68703 return -EBADF;
68704
68705+ buf.file = f.file;
68706 error = iterate_dir(f.file, &buf.ctx);
68707 if (error >= 0)
68708 error = buf.error;
68709diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
68710index 9c02d96..6562c10 100644
68711--- a/fs/reiserfs/do_balan.c
68712+++ b/fs/reiserfs/do_balan.c
68713@@ -1887,7 +1887,7 @@ void do_balance(struct tree_balance *tb, struct item_head *ih,
68714 return;
68715 }
68716
68717- atomic_inc(&fs_generation(tb->tb_sb));
68718+ atomic_inc_unchecked(&fs_generation(tb->tb_sb));
68719 do_balance_starts(tb);
68720
68721 /*
68722diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
68723index aca73dd..e3c558d 100644
68724--- a/fs/reiserfs/item_ops.c
68725+++ b/fs/reiserfs/item_ops.c
68726@@ -724,18 +724,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
68727 }
68728
68729 static struct item_operations errcatch_ops = {
68730- errcatch_bytes_number,
68731- errcatch_decrement_key,
68732- errcatch_is_left_mergeable,
68733- errcatch_print_item,
68734- errcatch_check_item,
68735+ .bytes_number = errcatch_bytes_number,
68736+ .decrement_key = errcatch_decrement_key,
68737+ .is_left_mergeable = errcatch_is_left_mergeable,
68738+ .print_item = errcatch_print_item,
68739+ .check_item = errcatch_check_item,
68740
68741- errcatch_create_vi,
68742- errcatch_check_left,
68743- errcatch_check_right,
68744- errcatch_part_size,
68745- errcatch_unit_num,
68746- errcatch_print_vi
68747+ .create_vi = errcatch_create_vi,
68748+ .check_left = errcatch_check_left,
68749+ .check_right = errcatch_check_right,
68750+ .part_size = errcatch_part_size,
68751+ .unit_num = errcatch_unit_num,
68752+ .print_vi = errcatch_print_vi
68753 };
68754
68755 #if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
68756diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
68757index 621b9f3..af527fd 100644
68758--- a/fs/reiserfs/procfs.c
68759+++ b/fs/reiserfs/procfs.c
68760@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
68761 "SMALL_TAILS " : "NO_TAILS ",
68762 replay_only(sb) ? "REPLAY_ONLY " : "",
68763 convert_reiserfs(sb) ? "CONV " : "",
68764- atomic_read(&r->s_generation_counter),
68765+ atomic_read_unchecked(&r->s_generation_counter),
68766 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
68767 SF(s_do_balance), SF(s_unneeded_left_neighbor),
68768 SF(s_good_search_by_key_reada), SF(s_bmaps),
68769diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
68770index bb79cdd..fcf49ef 100644
68771--- a/fs/reiserfs/reiserfs.h
68772+++ b/fs/reiserfs/reiserfs.h
68773@@ -580,7 +580,7 @@ struct reiserfs_sb_info {
68774 /* Comment? -Hans */
68775 wait_queue_head_t s_wait;
68776 /* increased by one every time the tree gets re-balanced */
68777- atomic_t s_generation_counter;
68778+ atomic_unchecked_t s_generation_counter;
68779
68780 /* File system properties. Currently holds on-disk FS format */
68781 unsigned long s_properties;
68782@@ -2301,7 +2301,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
68783 #define REISERFS_USER_MEM 1 /* user memory mode */
68784
68785 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
68786-#define get_generation(s) atomic_read (&fs_generation(s))
68787+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
68788 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
68789 #define __fs_changed(gen,s) (gen != get_generation (s))
68790 #define fs_changed(gen,s) \
68791diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
68792index 71fbbe3..eff29ba 100644
68793--- a/fs/reiserfs/super.c
68794+++ b/fs/reiserfs/super.c
68795@@ -1868,6 +1868,10 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
68796 sbi->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
68797 sbi->s_mount_opt |= (1 << REISERFS_ERROR_RO);
68798 sbi->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
68799+#ifdef CONFIG_REISERFS_FS_XATTR
68800+ /* turn on user xattrs by default */
68801+ sbi->s_mount_opt |= (1 << REISERFS_XATTRS_USER);
68802+#endif
68803 /* no preallocation minimum, be smart in reiserfs_file_write instead */
68804 sbi->s_alloc_options.preallocmin = 0;
68805 /* Preallocate by 16 blocks (17-1) at once */
68806diff --git a/fs/select.c b/fs/select.c
68807index f684c75..4117611 100644
68808--- a/fs/select.c
68809+++ b/fs/select.c
68810@@ -20,6 +20,7 @@
68811 #include <linux/export.h>
68812 #include <linux/slab.h>
68813 #include <linux/poll.h>
68814+#include <linux/security.h>
68815 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
68816 #include <linux/file.h>
68817 #include <linux/fdtable.h>
68818@@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
68819 struct poll_list *walk = head;
68820 unsigned long todo = nfds;
68821
68822+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
68823 if (nfds > rlimit(RLIMIT_NOFILE))
68824 return -EINVAL;
68825
68826diff --git a/fs/seq_file.c b/fs/seq_file.c
68827index 555f821..34684d7 100644
68828--- a/fs/seq_file.c
68829+++ b/fs/seq_file.c
68830@@ -12,6 +12,8 @@
68831 #include <linux/slab.h>
68832 #include <linux/cred.h>
68833 #include <linux/mm.h>
68834+#include <linux/sched.h>
68835+#include <linux/grsecurity.h>
68836
68837 #include <asm/uaccess.h>
68838 #include <asm/page.h>
68839@@ -23,16 +25,7 @@ static void seq_set_overflow(struct seq_file *m)
68840
68841 static void *seq_buf_alloc(unsigned long size)
68842 {
68843- void *buf;
68844-
68845- /*
68846- * __GFP_NORETRY to avoid oom-killings with high-order allocations -
68847- * it's better to fall back to vmalloc() than to kill things.
68848- */
68849- buf = kmalloc(size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
68850- if (!buf && size > PAGE_SIZE)
68851- buf = vmalloc(size);
68852- return buf;
68853+ return kmalloc(size, GFP_KERNEL | GFP_USERCOPY);
68854 }
68855
68856 /**
68857@@ -65,6 +58,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
68858 #ifdef CONFIG_USER_NS
68859 p->user_ns = file->f_cred->user_ns;
68860 #endif
68861+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68862+ p->exec_id = current->exec_id;
68863+#endif
68864
68865 /*
68866 * Wrappers around seq_open(e.g. swaps_open) need to be
68867@@ -87,6 +83,16 @@ int seq_open(struct file *file, const struct seq_operations *op)
68868 }
68869 EXPORT_SYMBOL(seq_open);
68870
68871+
68872+int seq_open_restrict(struct file *file, const struct seq_operations *op)
68873+{
68874+ if (gr_proc_is_restricted())
68875+ return -EACCES;
68876+
68877+ return seq_open(file, op);
68878+}
68879+EXPORT_SYMBOL(seq_open_restrict);
68880+
68881 static int traverse(struct seq_file *m, loff_t offset)
68882 {
68883 loff_t pos = 0, index;
68884@@ -158,7 +164,7 @@ Eoverflow:
68885 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
68886 {
68887 struct seq_file *m = file->private_data;
68888- size_t copied = 0;
68889+ ssize_t copied = 0;
68890 loff_t pos;
68891 size_t n;
68892 void *p;
68893@@ -557,7 +563,7 @@ static void single_stop(struct seq_file *p, void *v)
68894 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
68895 void *data)
68896 {
68897- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
68898+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
68899 int res = -ENOMEM;
68900
68901 if (op) {
68902@@ -593,6 +599,17 @@ int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
68903 }
68904 EXPORT_SYMBOL(single_open_size);
68905
68906+int single_open_restrict(struct file *file, int (*show)(struct seq_file *, void *),
68907+ void *data)
68908+{
68909+ if (gr_proc_is_restricted())
68910+ return -EACCES;
68911+
68912+ return single_open(file, show, data);
68913+}
68914+EXPORT_SYMBOL(single_open_restrict);
68915+
68916+
68917 int single_release(struct inode *inode, struct file *file)
68918 {
68919 const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
68920diff --git a/fs/splice.c b/fs/splice.c
68921index 7968da9..4ce985b 100644
68922--- a/fs/splice.c
68923+++ b/fs/splice.c
68924@@ -193,7 +193,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
68925 pipe_lock(pipe);
68926
68927 for (;;) {
68928- if (!pipe->readers) {
68929+ if (!atomic_read(&pipe->readers)) {
68930 send_sig(SIGPIPE, current, 0);
68931 if (!ret)
68932 ret = -EPIPE;
68933@@ -216,7 +216,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
68934 page_nr++;
68935 ret += buf->len;
68936
68937- if (pipe->files)
68938+ if (atomic_read(&pipe->files))
68939 do_wakeup = 1;
68940
68941 if (!--spd->nr_pages)
68942@@ -247,9 +247,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
68943 do_wakeup = 0;
68944 }
68945
68946- pipe->waiting_writers++;
68947+ atomic_inc(&pipe->waiting_writers);
68948 pipe_wait(pipe);
68949- pipe->waiting_writers--;
68950+ atomic_dec(&pipe->waiting_writers);
68951 }
68952
68953 pipe_unlock(pipe);
68954@@ -576,7 +576,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
68955 old_fs = get_fs();
68956 set_fs(get_ds());
68957 /* The cast to a user pointer is valid due to the set_fs() */
68958- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
68959+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
68960 set_fs(old_fs);
68961
68962 return res;
68963@@ -591,7 +591,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
68964 old_fs = get_fs();
68965 set_fs(get_ds());
68966 /* The cast to a user pointer is valid due to the set_fs() */
68967- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
68968+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
68969 set_fs(old_fs);
68970
68971 return res;
68972@@ -644,7 +644,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
68973 goto err;
68974
68975 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
68976- vec[i].iov_base = (void __user *) page_address(page);
68977+ vec[i].iov_base = (void __force_user *) page_address(page);
68978 vec[i].iov_len = this_len;
68979 spd.pages[i] = page;
68980 spd.nr_pages++;
68981@@ -783,7 +783,7 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
68982 ops->release(pipe, buf);
68983 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
68984 pipe->nrbufs--;
68985- if (pipe->files)
68986+ if (atomic_read(&pipe->files))
68987 sd->need_wakeup = true;
68988 }
68989
68990@@ -807,10 +807,10 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
68991 static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
68992 {
68993 while (!pipe->nrbufs) {
68994- if (!pipe->writers)
68995+ if (!atomic_read(&pipe->writers))
68996 return 0;
68997
68998- if (!pipe->waiting_writers && sd->num_spliced)
68999+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
69000 return 0;
69001
69002 if (sd->flags & SPLICE_F_NONBLOCK)
69003@@ -1025,7 +1025,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
69004 ops->release(pipe, buf);
69005 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
69006 pipe->nrbufs--;
69007- if (pipe->files)
69008+ if (atomic_read(&pipe->files))
69009 sd.need_wakeup = true;
69010 } else {
69011 buf->offset += ret;
69012@@ -1159,7 +1159,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
69013 long ret, bytes;
69014 umode_t i_mode;
69015 size_t len;
69016- int i, flags;
69017+ int i, flags, more;
69018
69019 /*
69020 * We require the input being a regular file, as we don't want to
69021@@ -1185,7 +1185,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
69022 * out of the pipe right after the splice_to_pipe(). So set
69023 * PIPE_READERS appropriately.
69024 */
69025- pipe->readers = 1;
69026+ atomic_set(&pipe->readers, 1);
69027
69028 current->splice_pipe = pipe;
69029 }
69030@@ -1202,6 +1202,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
69031 * Don't block on output, we have to drain the direct pipe.
69032 */
69033 sd->flags &= ~SPLICE_F_NONBLOCK;
69034+ more = sd->flags & SPLICE_F_MORE;
69035
69036 while (len) {
69037 size_t read_len;
69038@@ -1215,6 +1216,15 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
69039 sd->total_len = read_len;
69040
69041 /*
69042+ * If more data is pending, set SPLICE_F_MORE
69043+ * If this is the last data and SPLICE_F_MORE was not set
69044+ * initially, clears it.
69045+ */
69046+ if (read_len < len)
69047+ sd->flags |= SPLICE_F_MORE;
69048+ else if (!more)
69049+ sd->flags &= ~SPLICE_F_MORE;
69050+ /*
69051 * NOTE: nonblocking mode only applies to the input. We
69052 * must not do the output in nonblocking mode as then we
69053 * could get stuck data in the internal pipe:
69054@@ -1482,6 +1492,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
69055
69056 partial[buffers].offset = off;
69057 partial[buffers].len = plen;
69058+ partial[buffers].private = 0;
69059
69060 off = 0;
69061 len -= plen;
69062@@ -1718,9 +1729,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
69063 ret = -ERESTARTSYS;
69064 break;
69065 }
69066- if (!pipe->writers)
69067+ if (!atomic_read(&pipe->writers))
69068 break;
69069- if (!pipe->waiting_writers) {
69070+ if (!atomic_read(&pipe->waiting_writers)) {
69071 if (flags & SPLICE_F_NONBLOCK) {
69072 ret = -EAGAIN;
69073 break;
69074@@ -1752,7 +1763,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
69075 pipe_lock(pipe);
69076
69077 while (pipe->nrbufs >= pipe->buffers) {
69078- if (!pipe->readers) {
69079+ if (!atomic_read(&pipe->readers)) {
69080 send_sig(SIGPIPE, current, 0);
69081 ret = -EPIPE;
69082 break;
69083@@ -1765,9 +1776,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
69084 ret = -ERESTARTSYS;
69085 break;
69086 }
69087- pipe->waiting_writers++;
69088+ atomic_inc(&pipe->waiting_writers);
69089 pipe_wait(pipe);
69090- pipe->waiting_writers--;
69091+ atomic_dec(&pipe->waiting_writers);
69092 }
69093
69094 pipe_unlock(pipe);
69095@@ -1803,14 +1814,14 @@ retry:
69096 pipe_double_lock(ipipe, opipe);
69097
69098 do {
69099- if (!opipe->readers) {
69100+ if (!atomic_read(&opipe->readers)) {
69101 send_sig(SIGPIPE, current, 0);
69102 if (!ret)
69103 ret = -EPIPE;
69104 break;
69105 }
69106
69107- if (!ipipe->nrbufs && !ipipe->writers)
69108+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
69109 break;
69110
69111 /*
69112@@ -1907,7 +1918,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
69113 pipe_double_lock(ipipe, opipe);
69114
69115 do {
69116- if (!opipe->readers) {
69117+ if (!atomic_read(&opipe->readers)) {
69118 send_sig(SIGPIPE, current, 0);
69119 if (!ret)
69120 ret = -EPIPE;
69121@@ -1952,7 +1963,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
69122 * return EAGAIN if we have the potential of some data in the
69123 * future, otherwise just return 0
69124 */
69125- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
69126+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
69127 ret = -EAGAIN;
69128
69129 pipe_unlock(ipipe);
69130diff --git a/fs/squashfs/xattr.c b/fs/squashfs/xattr.c
69131index 92fcde7..1687329 100644
69132--- a/fs/squashfs/xattr.c
69133+++ b/fs/squashfs/xattr.c
69134@@ -46,8 +46,8 @@ ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
69135 + msblk->xattr_table;
69136 int offset = SQUASHFS_XATTR_OFFSET(squashfs_i(inode)->xattr);
69137 int count = squashfs_i(inode)->xattr_count;
69138- size_t rest = buffer_size;
69139- int err;
69140+ size_t used = 0;
69141+ ssize_t err;
69142
69143 /* check that the file system has xattrs */
69144 if (msblk->xattr_id_table == NULL)
69145@@ -68,11 +68,11 @@ ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
69146 name_size = le16_to_cpu(entry.size);
69147 handler = squashfs_xattr_handler(le16_to_cpu(entry.type));
69148 if (handler)
69149- prefix_size = handler->list(d, buffer, rest, NULL,
69150+ prefix_size = handler->list(d, buffer, buffer ? buffer_size - used : 0, NULL,
69151 name_size, handler->flags);
69152 if (prefix_size) {
69153 if (buffer) {
69154- if (prefix_size + name_size + 1 > rest) {
69155+ if (prefix_size + name_size + 1 > buffer_size - used) {
69156 err = -ERANGE;
69157 goto failed;
69158 }
69159@@ -86,7 +86,7 @@ ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
69160 buffer[name_size] = '\0';
69161 buffer += name_size + 1;
69162 }
69163- rest -= prefix_size + name_size + 1;
69164+ used += prefix_size + name_size + 1;
69165 } else {
69166 /* no handler or insuffficient privileges, so skip */
69167 err = squashfs_read_metadata(sb, NULL, &start,
69168@@ -107,7 +107,7 @@ ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
69169 if (err < 0)
69170 goto failed;
69171 }
69172- err = buffer_size - rest;
69173+ err = used;
69174
69175 failed:
69176 return err;
69177diff --git a/fs/stat.c b/fs/stat.c
69178index ae0c3ce..9ee641c 100644
69179--- a/fs/stat.c
69180+++ b/fs/stat.c
69181@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
69182 stat->gid = inode->i_gid;
69183 stat->rdev = inode->i_rdev;
69184 stat->size = i_size_read(inode);
69185- stat->atime = inode->i_atime;
69186- stat->mtime = inode->i_mtime;
69187+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
69188+ stat->atime = inode->i_ctime;
69189+ stat->mtime = inode->i_ctime;
69190+ } else {
69191+ stat->atime = inode->i_atime;
69192+ stat->mtime = inode->i_mtime;
69193+ }
69194 stat->ctime = inode->i_ctime;
69195 stat->blksize = (1 << inode->i_blkbits);
69196 stat->blocks = inode->i_blocks;
69197@@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
69198 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
69199 {
69200 struct inode *inode = path->dentry->d_inode;
69201+ int retval;
69202
69203- if (inode->i_op->getattr)
69204- return inode->i_op->getattr(path->mnt, path->dentry, stat);
69205+ if (inode->i_op->getattr) {
69206+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
69207+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
69208+ stat->atime = stat->ctime;
69209+ stat->mtime = stat->ctime;
69210+ }
69211+ return retval;
69212+ }
69213
69214 generic_fillattr(inode, stat);
69215 return 0;
69216diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
69217index 0b45ff4..edf9d3a 100644
69218--- a/fs/sysfs/dir.c
69219+++ b/fs/sysfs/dir.c
69220@@ -33,6 +33,10 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
69221 kfree(buf);
69222 }
69223
69224+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
69225+extern int grsec_enable_sysfs_restrict;
69226+#endif
69227+
69228 /**
69229 * sysfs_create_dir_ns - create a directory for an object with a namespace tag
69230 * @kobj: object we're creating directory for
69231@@ -41,9 +45,16 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
69232 int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
69233 {
69234 struct kernfs_node *parent, *kn;
69235+ const char *name;
69236+ umode_t mode = S_IRWXU | S_IRUGO | S_IXUGO;
69237+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
69238+ const char *parent_name;
69239+#endif
69240
69241 BUG_ON(!kobj);
69242
69243+ name = kobject_name(kobj);
69244+
69245 if (kobj->parent)
69246 parent = kobj->parent->sd;
69247 else
69248@@ -52,11 +63,24 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
69249 if (!parent)
69250 return -ENOENT;
69251
69252- kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
69253- S_IRWXU | S_IRUGO | S_IXUGO, kobj, ns);
69254+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
69255+ parent_name = parent->name;
69256+ mode = S_IRWXU;
69257+
69258+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
69259+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
69260+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
69261+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
69262+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
69263+ if (!grsec_enable_sysfs_restrict)
69264+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
69265+#endif
69266+
69267+ kn = kernfs_create_dir_ns(parent, name,
69268+ mode, kobj, ns);
69269 if (IS_ERR(kn)) {
69270 if (PTR_ERR(kn) == -EEXIST)
69271- sysfs_warn_dup(parent, kobject_name(kobj));
69272+ sysfs_warn_dup(parent, name);
69273 return PTR_ERR(kn);
69274 }
69275
69276diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
69277index 69d4889..a810bd4 100644
69278--- a/fs/sysv/sysv.h
69279+++ b/fs/sysv/sysv.h
69280@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
69281 #endif
69282 }
69283
69284-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
69285+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
69286 {
69287 if (sbi->s_bytesex == BYTESEX_PDP)
69288 return PDP_swab((__force __u32)n);
69289diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
69290index fb08b0c..65fcc7e 100644
69291--- a/fs/ubifs/io.c
69292+++ b/fs/ubifs/io.c
69293@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
69294 return err;
69295 }
69296
69297-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
69298+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
69299 {
69300 int err;
69301
69302diff --git a/fs/udf/misc.c b/fs/udf/misc.c
69303index c175b4d..8f36a16 100644
69304--- a/fs/udf/misc.c
69305+++ b/fs/udf/misc.c
69306@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
69307
69308 u8 udf_tag_checksum(const struct tag *t)
69309 {
69310- u8 *data = (u8 *)t;
69311+ const u8 *data = (const u8 *)t;
69312 u8 checksum = 0;
69313 int i;
69314 for (i = 0; i < sizeof(struct tag); ++i)
69315diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
69316index 8d974c4..b82f6ec 100644
69317--- a/fs/ufs/swab.h
69318+++ b/fs/ufs/swab.h
69319@@ -22,7 +22,7 @@ enum {
69320 BYTESEX_BE
69321 };
69322
69323-static inline u64
69324+static inline u64 __intentional_overflow(-1)
69325 fs64_to_cpu(struct super_block *sbp, __fs64 n)
69326 {
69327 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
69328@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
69329 return (__force __fs64)cpu_to_be64(n);
69330 }
69331
69332-static inline u32
69333+static inline u32 __intentional_overflow(-1)
69334 fs32_to_cpu(struct super_block *sbp, __fs32 n)
69335 {
69336 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
69337diff --git a/fs/utimes.c b/fs/utimes.c
69338index aa138d6..5f3a811 100644
69339--- a/fs/utimes.c
69340+++ b/fs/utimes.c
69341@@ -1,6 +1,7 @@
69342 #include <linux/compiler.h>
69343 #include <linux/file.h>
69344 #include <linux/fs.h>
69345+#include <linux/security.h>
69346 #include <linux/linkage.h>
69347 #include <linux/mount.h>
69348 #include <linux/namei.h>
69349@@ -103,6 +104,12 @@ static int utimes_common(struct path *path, struct timespec *times)
69350 }
69351 }
69352 retry_deleg:
69353+
69354+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
69355+ error = -EACCES;
69356+ goto mnt_drop_write_and_out;
69357+ }
69358+
69359 mutex_lock(&inode->i_mutex);
69360 error = notify_change(path->dentry, &newattrs, &delegated_inode);
69361 mutex_unlock(&inode->i_mutex);
69362diff --git a/fs/xattr.c b/fs/xattr.c
69363index 4ef6985..a6cd6567 100644
69364--- a/fs/xattr.c
69365+++ b/fs/xattr.c
69366@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
69367 return rc;
69368 }
69369
69370+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
69371+ssize_t
69372+pax_getxattr(struct dentry *dentry, void *value, size_t size)
69373+{
69374+ struct inode *inode = dentry->d_inode;
69375+ ssize_t error;
69376+
69377+ error = inode_permission(inode, MAY_EXEC);
69378+ if (error)
69379+ return error;
69380+
69381+ if (inode->i_op->getxattr)
69382+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
69383+ else
69384+ error = -EOPNOTSUPP;
69385+
69386+ return error;
69387+}
69388+EXPORT_SYMBOL(pax_getxattr);
69389+#endif
69390+
69391 ssize_t
69392 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
69393 {
69394@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
69395 * Extended attribute SET operations
69396 */
69397 static long
69398-setxattr(struct dentry *d, const char __user *name, const void __user *value,
69399+setxattr(struct path *path, const char __user *name, const void __user *value,
69400 size_t size, int flags)
69401 {
69402 int error;
69403@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
69404 posix_acl_fix_xattr_from_user(kvalue, size);
69405 }
69406
69407- error = vfs_setxattr(d, kname, kvalue, size, flags);
69408+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
69409+ error = -EACCES;
69410+ goto out;
69411+ }
69412+
69413+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
69414 out:
69415 if (vvalue)
69416 vfree(vvalue);
69417@@ -376,7 +402,7 @@ retry:
69418 return error;
69419 error = mnt_want_write(path.mnt);
69420 if (!error) {
69421- error = setxattr(path.dentry, name, value, size, flags);
69422+ error = setxattr(&path, name, value, size, flags);
69423 mnt_drop_write(path.mnt);
69424 }
69425 path_put(&path);
69426@@ -412,7 +438,7 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
69427 audit_file(f.file);
69428 error = mnt_want_write_file(f.file);
69429 if (!error) {
69430- error = setxattr(f.file->f_path.dentry, name, value, size, flags);
69431+ error = setxattr(&f.file->f_path, name, value, size, flags);
69432 mnt_drop_write_file(f.file);
69433 }
69434 fdput(f);
69435@@ -598,7 +624,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
69436 * Extended attribute REMOVE operations
69437 */
69438 static long
69439-removexattr(struct dentry *d, const char __user *name)
69440+removexattr(struct path *path, const char __user *name)
69441 {
69442 int error;
69443 char kname[XATTR_NAME_MAX + 1];
69444@@ -609,7 +635,10 @@ removexattr(struct dentry *d, const char __user *name)
69445 if (error < 0)
69446 return error;
69447
69448- return vfs_removexattr(d, kname);
69449+ if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
69450+ return -EACCES;
69451+
69452+ return vfs_removexattr(path->dentry, kname);
69453 }
69454
69455 static int path_removexattr(const char __user *pathname,
69456@@ -623,7 +652,7 @@ retry:
69457 return error;
69458 error = mnt_want_write(path.mnt);
69459 if (!error) {
69460- error = removexattr(path.dentry, name);
69461+ error = removexattr(&path, name);
69462 mnt_drop_write(path.mnt);
69463 }
69464 path_put(&path);
69465@@ -649,14 +678,16 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
69466 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
69467 {
69468 struct fd f = fdget(fd);
69469+ struct path *path;
69470 int error = -EBADF;
69471
69472 if (!f.file)
69473 return error;
69474+ path = &f.file->f_path;
69475 audit_file(f.file);
69476 error = mnt_want_write_file(f.file);
69477 if (!error) {
69478- error = removexattr(f.file->f_path.dentry, name);
69479+ error = removexattr(path, name);
69480 mnt_drop_write_file(f.file);
69481 }
69482 fdput(f);
69483diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
69484index 61ec015..7c18807 100644
69485--- a/fs/xfs/libxfs/xfs_bmap.c
69486+++ b/fs/xfs/libxfs/xfs_bmap.c
69487@@ -580,7 +580,7 @@ xfs_bmap_validate_ret(
69488
69489 #else
69490 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
69491-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
69492+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
69493 #endif /* DEBUG */
69494
69495 /*
69496diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
69497index 098cd78..724d3f8 100644
69498--- a/fs/xfs/xfs_dir2_readdir.c
69499+++ b/fs/xfs/xfs_dir2_readdir.c
69500@@ -140,7 +140,12 @@ xfs_dir2_sf_getdents(
69501 ino = dp->d_ops->sf_get_ino(sfp, sfep);
69502 filetype = dp->d_ops->sf_get_ftype(sfep);
69503 ctx->pos = off & 0x7fffffff;
69504- if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
69505+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
69506+ char name[sfep->namelen];
69507+ memcpy(name, sfep->name, sfep->namelen);
69508+ if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(dp->i_mount, filetype)))
69509+ return 0;
69510+ } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
69511 xfs_dir3_get_dtype(dp->i_mount, filetype)))
69512 return 0;
69513 sfep = dp->d_ops->sf_nextentry(sfp, sfep);
69514diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
69515index ac4feae..386d551 100644
69516--- a/fs/xfs/xfs_ioctl.c
69517+++ b/fs/xfs/xfs_ioctl.c
69518@@ -120,7 +120,7 @@ xfs_find_handle(
69519 }
69520
69521 error = -EFAULT;
69522- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
69523+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
69524 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
69525 goto out_put;
69526
69527diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
69528index c31d2c2..6ec8f62 100644
69529--- a/fs/xfs/xfs_linux.h
69530+++ b/fs/xfs/xfs_linux.h
69531@@ -234,7 +234,7 @@ static inline kgid_t xfs_gid_to_kgid(__uint32_t gid)
69532 * of the compiler which do not like us using do_div in the middle
69533 * of large functions.
69534 */
69535-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
69536+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
69537 {
69538 __u32 mod;
69539
69540@@ -290,7 +290,7 @@ static inline __u32 xfs_do_mod(void *a, __u32 b, int n)
69541 return 0;
69542 }
69543 #else
69544-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
69545+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
69546 {
69547 __u32 mod;
69548
69549diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
69550new file mode 100644
69551index 0000000..31f8fe4
69552--- /dev/null
69553+++ b/grsecurity/Kconfig
69554@@ -0,0 +1,1182 @@
69555+#
69556+# grecurity configuration
69557+#
69558+menu "Memory Protections"
69559+depends on GRKERNSEC
69560+
69561+config GRKERNSEC_KMEM
69562+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
69563+ default y if GRKERNSEC_CONFIG_AUTO
69564+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
69565+ help
69566+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
69567+ be written to or read from to modify or leak the contents of the running
69568+ kernel. /dev/port will also not be allowed to be opened, writing to
69569+ /dev/cpu/*/msr will be prevented, and support for kexec will be removed.
69570+ If you have module support disabled, enabling this will close up several
69571+ ways that are currently used to insert malicious code into the running
69572+ kernel.
69573+
69574+ Even with this feature enabled, we still highly recommend that
69575+ you use the RBAC system, as it is still possible for an attacker to
69576+ modify the running kernel through other more obscure methods.
69577+
69578+ It is highly recommended that you say Y here if you meet all the
69579+ conditions above.
69580+
69581+config GRKERNSEC_VM86
69582+ bool "Restrict VM86 mode"
69583+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
69584+ depends on X86_32
69585+
69586+ help
69587+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
69588+ make use of a special execution mode on 32bit x86 processors called
69589+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
69590+ video cards and will still work with this option enabled. The purpose
69591+ of the option is to prevent exploitation of emulation errors in
69592+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
69593+ Nearly all users should be able to enable this option.
69594+
69595+config GRKERNSEC_IO
69596+ bool "Disable privileged I/O"
69597+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
69598+ depends on X86
69599+ select RTC_CLASS
69600+ select RTC_INTF_DEV
69601+ select RTC_DRV_CMOS
69602+
69603+ help
69604+ If you say Y here, all ioperm and iopl calls will return an error.
69605+ Ioperm and iopl can be used to modify the running kernel.
69606+ Unfortunately, some programs need this access to operate properly,
69607+ the most notable of which are XFree86 and hwclock. hwclock can be
69608+ remedied by having RTC support in the kernel, so real-time
69609+ clock support is enabled if this option is enabled, to ensure
69610+ that hwclock operates correctly. If hwclock still does not work,
69611+ either update udev or symlink /dev/rtc to /dev/rtc0.
69612+
69613+ If you're using XFree86 or a version of Xorg from 2012 or earlier,
69614+ you may not be able to boot into a graphical environment with this
69615+ option enabled. In this case, you should use the RBAC system instead.
69616+
69617+config GRKERNSEC_BPF_HARDEN
69618+ bool "Harden BPF interpreter"
69619+ default y if GRKERNSEC_CONFIG_AUTO
69620+ help
69621+ Unlike previous versions of grsecurity that hardened both the BPF
69622+ interpreted code against corruption at rest as well as the JIT code
69623+ against JIT-spray attacks and attacker-controlled immediate values
69624+ for ROP, this feature will enforce disabling of the new eBPF JIT engine
69625+ and will ensure the interpreted code is read-only at rest. This feature
69626+ may be removed at a later time when eBPF stabilizes to entirely revert
69627+ back to the more secure pre-3.16 BPF interpreter/JIT.
69628+
69629+ If you're using KERNEXEC, it's recommended that you enable this option
69630+ to supplement the hardening of the kernel.
69631+
69632+config GRKERNSEC_PERF_HARDEN
69633+ bool "Disable unprivileged PERF_EVENTS usage by default"
69634+ default y if GRKERNSEC_CONFIG_AUTO
69635+ depends on PERF_EVENTS
69636+ help
69637+ If you say Y here, the range of acceptable values for the
69638+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
69639+ default to a new value: 3. When the sysctl is set to this value, no
69640+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
69641+
69642+ Though PERF_EVENTS can be used legitimately for performance monitoring
69643+ and low-level application profiling, it is forced on regardless of
69644+ configuration, has been at fault for several vulnerabilities, and
69645+ creates new opportunities for side channels and other information leaks.
69646+
69647+ This feature puts PERF_EVENTS into a secure default state and permits
69648+ the administrator to change out of it temporarily if unprivileged
69649+ application profiling is needed.
69650+
69651+config GRKERNSEC_RAND_THREADSTACK
69652+ bool "Insert random gaps between thread stacks"
69653+ default y if GRKERNSEC_CONFIG_AUTO
69654+ depends on PAX_RANDMMAP && !PPC
69655+ help
69656+ If you say Y here, a random-sized gap will be enforced between allocated
69657+ thread stacks. Glibc's NPTL and other threading libraries that
69658+ pass MAP_STACK to the kernel for thread stack allocation are supported.
69659+ The implementation currently provides 8 bits of entropy for the gap.
69660+
69661+ Many distributions do not compile threaded remote services with the
69662+ -fstack-check argument to GCC, causing the variable-sized stack-based
69663+ allocator, alloca(), to not probe the stack on allocation. This
69664+ permits an unbounded alloca() to skip over any guard page and potentially
69665+ modify another thread's stack reliably. An enforced random gap
69666+ reduces the reliability of such an attack and increases the chance
69667+ that such a read/write to another thread's stack instead lands in
69668+ an unmapped area, causing a crash and triggering grsecurity's
69669+ anti-bruteforcing logic.
69670+
69671+config GRKERNSEC_PROC_MEMMAP
69672+ bool "Harden ASLR against information leaks and entropy reduction"
69673+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
69674+ depends on PAX_NOEXEC || PAX_ASLR
69675+ help
69676+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
69677+ give no information about the addresses of its mappings if
69678+ PaX features that rely on random addresses are enabled on the task.
69679+ In addition to sanitizing this information and disabling other
69680+ dangerous sources of information, this option causes reads of sensitive
69681+ /proc/<pid> entries where the file descriptor was opened in a different
69682+ task than the one performing the read. Such attempts are logged.
69683+ This option also limits argv/env strings for suid/sgid binaries
69684+ to 512KB to prevent a complete exhaustion of the stack entropy provided
69685+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
69686+ binaries to prevent alternative mmap layouts from being abused.
69687+
69688+ If you use PaX it is essential that you say Y here as it closes up
69689+ several holes that make full ASLR useless locally.
69690+
69691+
69692+config GRKERNSEC_KSTACKOVERFLOW
69693+ bool "Prevent kernel stack overflows"
69694+ default y if GRKERNSEC_CONFIG_AUTO
69695+ depends on !IA64 && 64BIT
69696+ help
69697+ If you say Y here, the kernel's process stacks will be allocated
69698+ with vmalloc instead of the kernel's default allocator. This
69699+ introduces guard pages that in combination with the alloca checking
69700+ of the STACKLEAK feature prevents all forms of kernel process stack
69701+ overflow abuse. Note that this is different from kernel stack
69702+ buffer overflows.
69703+
69704+config GRKERNSEC_BRUTE
69705+ bool "Deter exploit bruteforcing"
69706+ default y if GRKERNSEC_CONFIG_AUTO
69707+ help
69708+ If you say Y here, attempts to bruteforce exploits against forking
69709+ daemons such as apache or sshd, as well as against suid/sgid binaries
69710+ will be deterred. When a child of a forking daemon is killed by PaX
69711+ or crashes due to an illegal instruction or other suspicious signal,
69712+ the parent process will be delayed 30 seconds upon every subsequent
69713+ fork until the administrator is able to assess the situation and
69714+ restart the daemon.
69715+ In the suid/sgid case, the attempt is logged, the user has all their
69716+ existing instances of the suid/sgid binary terminated and will
69717+ be unable to execute any suid/sgid binaries for 15 minutes.
69718+
69719+ It is recommended that you also enable signal logging in the auditing
69720+ section so that logs are generated when a process triggers a suspicious
69721+ signal.
69722+ If the sysctl option is enabled, a sysctl option with name
69723+ "deter_bruteforce" is created.
69724+
69725+config GRKERNSEC_MODHARDEN
69726+ bool "Harden module auto-loading"
69727+ default y if GRKERNSEC_CONFIG_AUTO
69728+ depends on MODULES
69729+ help
69730+ If you say Y here, module auto-loading in response to use of some
69731+ feature implemented by an unloaded module will be restricted to
69732+ root users. Enabling this option helps defend against attacks
69733+ by unprivileged users who abuse the auto-loading behavior to
69734+ cause a vulnerable module to load that is then exploited.
69735+
69736+ If this option prevents a legitimate use of auto-loading for a
69737+ non-root user, the administrator can execute modprobe manually
69738+ with the exact name of the module mentioned in the alert log.
69739+ Alternatively, the administrator can add the module to the list
69740+ of modules loaded at boot by modifying init scripts.
69741+
69742+ Modification of init scripts will most likely be needed on
69743+ Ubuntu servers with encrypted home directory support enabled,
69744+ as the first non-root user logging in will cause the ecb(aes),
69745+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
69746+
69747+config GRKERNSEC_HIDESYM
69748+ bool "Hide kernel symbols"
69749+ default y if GRKERNSEC_CONFIG_AUTO
69750+ select PAX_USERCOPY_SLABS
69751+ help
69752+ If you say Y here, getting information on loaded modules, and
69753+ displaying all kernel symbols through a syscall will be restricted
69754+ to users with CAP_SYS_MODULE. For software compatibility reasons,
69755+ /proc/kallsyms will be restricted to the root user. The RBAC
69756+ system can hide that entry even from root.
69757+
69758+ This option also prevents leaking of kernel addresses through
69759+ several /proc entries.
69760+
69761+ Note that this option is only effective provided the following
69762+ conditions are met:
69763+ 1) The kernel using grsecurity is not precompiled by some distribution
69764+ 2) You have also enabled GRKERNSEC_DMESG
69765+ 3) You are using the RBAC system and hiding other files such as your
69766+ kernel image and System.map. Alternatively, enabling this option
69767+ causes the permissions on /boot, /lib/modules, and the kernel
69768+ source directory to change at compile time to prevent
69769+ reading by non-root users.
69770+ If the above conditions are met, this option will aid in providing a
69771+ useful protection against local kernel exploitation of overflows
69772+ and arbitrary read/write vulnerabilities.
69773+
69774+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
69775+ in addition to this feature.
69776+
69777+config GRKERNSEC_RANDSTRUCT
69778+ bool "Randomize layout of sensitive kernel structures"
69779+ default y if GRKERNSEC_CONFIG_AUTO
69780+ select GRKERNSEC_HIDESYM
69781+ select MODVERSIONS if MODULES
69782+ help
69783+ If you say Y here, the layouts of a number of sensitive kernel
69784+ structures (task, fs, cred, etc) and all structures composed entirely
69785+ of function pointers (aka "ops" structs) will be randomized at compile-time.
69786+ This can introduce the requirement of an additional infoleak
69787+ vulnerability for exploits targeting these structure types.
69788+
69789+ Enabling this feature will introduce some performance impact, slightly
69790+ increase memory usage, and prevent the use of forensic tools like
69791+ Volatility against the system (unless the kernel source tree isn't
69792+ cleaned after kernel installation).
69793+
69794+ The seed used for compilation is located at tools/gcc/randomize_layout_seed.h.
69795+ It remains after a make clean to allow for external modules to be compiled
69796+ with the existing seed and will be removed by a make mrproper or
69797+ make distclean.
69798+
69799+ Note that the implementation requires gcc 4.6.4. or newer. You may need
69800+ to install the supporting headers explicitly in addition to the normal
69801+ gcc package.
69802+
69803+config GRKERNSEC_RANDSTRUCT_PERFORMANCE
69804+ bool "Use cacheline-aware structure randomization"
69805+ depends on GRKERNSEC_RANDSTRUCT
69806+ default y if GRKERNSEC_CONFIG_PRIORITY_PERF
69807+ help
69808+ If you say Y here, the RANDSTRUCT randomization will make a best effort
69809+ at restricting randomization to cacheline-sized groups of elements. It
69810+ will further not randomize bitfields in structures. This reduces the
69811+ performance hit of RANDSTRUCT at the cost of weakened randomization.
69812+
69813+config GRKERNSEC_KERN_LOCKOUT
69814+ bool "Active kernel exploit response"
69815+ default y if GRKERNSEC_CONFIG_AUTO
69816+ depends on X86 || ARM || PPC || SPARC
69817+ help
69818+ If you say Y here, when a PaX alert is triggered due to suspicious
69819+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
69820+ or an OOPS occurs due to bad memory accesses, instead of just
69821+ terminating the offending process (and potentially allowing
69822+ a subsequent exploit from the same user), we will take one of two
69823+ actions:
69824+ If the user was root, we will panic the system
69825+ If the user was non-root, we will log the attempt, terminate
69826+ all processes owned by the user, then prevent them from creating
69827+ any new processes until the system is restarted
69828+ This deters repeated kernel exploitation/bruteforcing attempts
69829+ and is useful for later forensics.
69830+
69831+config GRKERNSEC_OLD_ARM_USERLAND
69832+ bool "Old ARM userland compatibility"
69833+ depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
69834+ help
69835+ If you say Y here, stubs of executable code to perform such operations
69836+ as "compare-exchange" will be placed at fixed locations in the ARM vector
69837+ table. This is unfortunately needed for old ARM userland meant to run
69838+ across a wide range of processors. Without this option enabled,
69839+ the get_tls and data memory barrier stubs will be emulated by the kernel,
69840+ which is enough for Linaro userlands or other userlands designed for v6
69841+ and newer ARM CPUs. It's recommended that you try without this option enabled
69842+ first, and only enable it if your userland does not boot (it will likely fail
69843+ at init time).
69844+
69845+endmenu
69846+menu "Role Based Access Control Options"
69847+depends on GRKERNSEC
69848+
69849+config GRKERNSEC_RBAC_DEBUG
69850+ bool
69851+
69852+config GRKERNSEC_NO_RBAC
69853+ bool "Disable RBAC system"
69854+ help
69855+ If you say Y here, the /dev/grsec device will be removed from the kernel,
69856+ preventing the RBAC system from being enabled. You should only say Y
69857+ here if you have no intention of using the RBAC system, so as to prevent
69858+ an attacker with root access from misusing the RBAC system to hide files
69859+ and processes when loadable module support and /dev/[k]mem have been
69860+ locked down.
69861+
69862+config GRKERNSEC_ACL_HIDEKERN
69863+ bool "Hide kernel processes"
69864+ help
69865+ If you say Y here, all kernel threads will be hidden to all
69866+ processes but those whose subject has the "view hidden processes"
69867+ flag.
69868+
69869+config GRKERNSEC_ACL_MAXTRIES
69870+ int "Maximum tries before password lockout"
69871+ default 3
69872+ help
69873+ This option enforces the maximum number of times a user can attempt
69874+ to authorize themselves with the grsecurity RBAC system before being
69875+ denied the ability to attempt authorization again for a specified time.
69876+ The lower the number, the harder it will be to brute-force a password.
69877+
69878+config GRKERNSEC_ACL_TIMEOUT
69879+ int "Time to wait after max password tries, in seconds"
69880+ default 30
69881+ help
69882+ This option specifies the time the user must wait after attempting to
69883+ authorize to the RBAC system with the maximum number of invalid
69884+ passwords. The higher the number, the harder it will be to brute-force
69885+ a password.
69886+
69887+endmenu
69888+menu "Filesystem Protections"
69889+depends on GRKERNSEC
69890+
69891+config GRKERNSEC_PROC
69892+ bool "Proc restrictions"
69893+ default y if GRKERNSEC_CONFIG_AUTO
69894+ help
69895+ If you say Y here, the permissions of the /proc filesystem
69896+ will be altered to enhance system security and privacy. You MUST
69897+ choose either a user only restriction or a user and group restriction.
69898+ Depending upon the option you choose, you can either restrict users to
69899+ see only the processes they themselves run, or choose a group that can
69900+ view all processes and files normally restricted to root if you choose
69901+ the "restrict to user only" option. NOTE: If you're running identd or
69902+ ntpd as a non-root user, you will have to run it as the group you
69903+ specify here.
69904+
69905+config GRKERNSEC_PROC_USER
69906+ bool "Restrict /proc to user only"
69907+ depends on GRKERNSEC_PROC
69908+ help
69909+ If you say Y here, non-root users will only be able to view their own
69910+ processes, and restricts them from viewing network-related information,
69911+ and viewing kernel symbol and module information.
69912+
69913+config GRKERNSEC_PROC_USERGROUP
69914+ bool "Allow special group"
69915+ default y if GRKERNSEC_CONFIG_AUTO
69916+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
69917+ help
69918+ If you say Y here, you will be able to select a group that will be
69919+ able to view all processes and network-related information. If you've
69920+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
69921+ remain hidden. This option is useful if you want to run identd as
69922+ a non-root user. The group you select may also be chosen at boot time
69923+ via "grsec_proc_gid=" on the kernel commandline.
69924+
69925+config GRKERNSEC_PROC_GID
69926+ int "GID for special group"
69927+ depends on GRKERNSEC_PROC_USERGROUP
69928+ default 1001
69929+
69930+config GRKERNSEC_PROC_ADD
69931+ bool "Additional restrictions"
69932+ default y if GRKERNSEC_CONFIG_AUTO
69933+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
69934+ help
69935+ If you say Y here, additional restrictions will be placed on
69936+ /proc that keep normal users from viewing device information and
69937+ slabinfo information that could be useful for exploits.
69938+
69939+config GRKERNSEC_LINK
69940+ bool "Linking restrictions"
69941+ default y if GRKERNSEC_CONFIG_AUTO
69942+ help
69943+ If you say Y here, /tmp race exploits will be prevented, since users
69944+ will no longer be able to follow symlinks owned by other users in
69945+ world-writable +t directories (e.g. /tmp), unless the owner of the
69946+ symlink is the owner of the directory. users will also not be
69947+ able to hardlink to files they do not own. If the sysctl option is
69948+ enabled, a sysctl option with name "linking_restrictions" is created.
69949+
69950+config GRKERNSEC_SYMLINKOWN
69951+ bool "Kernel-enforced SymlinksIfOwnerMatch"
69952+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
69953+ help
69954+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
69955+ that prevents it from being used as a security feature. As Apache
69956+ verifies the symlink by performing a stat() against the target of
69957+ the symlink before it is followed, an attacker can setup a symlink
69958+ to point to a same-owned file, then replace the symlink with one
69959+ that targets another user's file just after Apache "validates" the
69960+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
69961+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
69962+ will be in place for the group you specify. If the sysctl option
69963+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
69964+ created.
69965+
69966+config GRKERNSEC_SYMLINKOWN_GID
69967+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
69968+ depends on GRKERNSEC_SYMLINKOWN
69969+ default 1006
69970+ help
69971+ Setting this GID determines what group kernel-enforced
69972+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
69973+ is enabled, a sysctl option with name "symlinkown_gid" is created.
69974+
69975+config GRKERNSEC_FIFO
69976+ bool "FIFO restrictions"
69977+ default y if GRKERNSEC_CONFIG_AUTO
69978+ help
69979+ If you say Y here, users will not be able to write to FIFOs they don't
69980+ own in world-writable +t directories (e.g. /tmp), unless the owner of
69981+ the FIFO is the same owner of the directory it's held in. If the sysctl
69982+ option is enabled, a sysctl option with name "fifo_restrictions" is
69983+ created.
69984+
69985+config GRKERNSEC_SYSFS_RESTRICT
69986+ bool "Sysfs/debugfs restriction"
69987+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
69988+ depends on SYSFS
69989+ help
69990+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
69991+ any filesystem normally mounted under it (e.g. debugfs) will be
69992+ mostly accessible only by root. These filesystems generally provide access
69993+ to hardware and debug information that isn't appropriate for unprivileged
69994+ users of the system. Sysfs and debugfs have also become a large source
69995+ of new vulnerabilities, ranging from infoleaks to local compromise.
69996+ There has been very little oversight with an eye toward security involved
69997+ in adding new exporters of information to these filesystems, so their
69998+ use is discouraged.
69999+ For reasons of compatibility, a few directories have been whitelisted
70000+ for access by non-root users:
70001+ /sys/fs/selinux
70002+ /sys/fs/fuse
70003+ /sys/devices/system/cpu
70004+
70005+config GRKERNSEC_ROFS
70006+ bool "Runtime read-only mount protection"
70007+ depends on SYSCTL
70008+ help
70009+ If you say Y here, a sysctl option with name "romount_protect" will
70010+ be created. By setting this option to 1 at runtime, filesystems
70011+ will be protected in the following ways:
70012+ * No new writable mounts will be allowed
70013+ * Existing read-only mounts won't be able to be remounted read/write
70014+ * Write operations will be denied on all block devices
70015+ This option acts independently of grsec_lock: once it is set to 1,
70016+ it cannot be turned off. Therefore, please be mindful of the resulting
70017+ behavior if this option is enabled in an init script on a read-only
70018+ filesystem.
70019+ Also be aware that as with other root-focused features, GRKERNSEC_KMEM
70020+ and GRKERNSEC_IO should be enabled and module loading disabled via
70021+ config or at runtime.
70022+ This feature is mainly intended for secure embedded systems.
70023+
70024+
70025+config GRKERNSEC_DEVICE_SIDECHANNEL
70026+ bool "Eliminate stat/notify-based device sidechannels"
70027+ default y if GRKERNSEC_CONFIG_AUTO
70028+ help
70029+ If you say Y here, timing analyses on block or character
70030+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
70031+ will be thwarted for unprivileged users. If a process without
70032+ CAP_MKNOD stats such a device, the last access and last modify times
70033+ will match the device's create time. No access or modify events
70034+ will be triggered through inotify/dnotify/fanotify for such devices.
70035+ This feature will prevent attacks that may at a minimum
70036+ allow an attacker to determine the administrator's password length.
70037+
70038+config GRKERNSEC_CHROOT
70039+ bool "Chroot jail restrictions"
70040+ default y if GRKERNSEC_CONFIG_AUTO
70041+ help
70042+ If you say Y here, you will be able to choose several options that will
70043+ make breaking out of a chrooted jail much more difficult. If you
70044+ encounter no software incompatibilities with the following options, it
70045+ is recommended that you enable each one.
70046+
70047+ Note that the chroot restrictions are not intended to apply to "chroots"
70048+ to directories that are simple bind mounts of the global root filesystem.
70049+ For several other reasons, a user shouldn't expect any significant
70050+ security by performing such a chroot.
70051+
70052+config GRKERNSEC_CHROOT_MOUNT
70053+ bool "Deny mounts"
70054+ default y if GRKERNSEC_CONFIG_AUTO
70055+ depends on GRKERNSEC_CHROOT
70056+ help
70057+ If you say Y here, processes inside a chroot will not be able to
70058+ mount or remount filesystems. If the sysctl option is enabled, a
70059+ sysctl option with name "chroot_deny_mount" is created.
70060+
70061+config GRKERNSEC_CHROOT_DOUBLE
70062+ bool "Deny double-chroots"
70063+ default y if GRKERNSEC_CONFIG_AUTO
70064+ depends on GRKERNSEC_CHROOT
70065+ help
70066+ If you say Y here, processes inside a chroot will not be able to chroot
70067+ again outside the chroot. This is a widely used method of breaking
70068+ out of a chroot jail and should not be allowed. If the sysctl
70069+ option is enabled, a sysctl option with name
70070+ "chroot_deny_chroot" is created.
70071+
70072+config GRKERNSEC_CHROOT_PIVOT
70073+ bool "Deny pivot_root in chroot"
70074+ default y if GRKERNSEC_CONFIG_AUTO
70075+ depends on GRKERNSEC_CHROOT
70076+ help
70077+ If you say Y here, processes inside a chroot will not be able to use
70078+ a function called pivot_root() that was introduced in Linux 2.3.41. It
70079+ works similar to chroot in that it changes the root filesystem. This
70080+ function could be misused in a chrooted process to attempt to break out
70081+ of the chroot, and therefore should not be allowed. If the sysctl
70082+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
70083+ created.
70084+
70085+config GRKERNSEC_CHROOT_CHDIR
70086+ bool "Enforce chdir(\"/\") on all chroots"
70087+ default y if GRKERNSEC_CONFIG_AUTO
70088+ depends on GRKERNSEC_CHROOT
70089+ help
70090+ If you say Y here, the current working directory of all newly-chrooted
70091+ applications will be set to the the root directory of the chroot.
70092+ The man page on chroot(2) states:
70093+ Note that this call does not change the current working
70094+ directory, so that `.' can be outside the tree rooted at
70095+ `/'. In particular, the super-user can escape from a
70096+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
70097+
70098+ It is recommended that you say Y here, since it's not known to break
70099+ any software. If the sysctl option is enabled, a sysctl option with
70100+ name "chroot_enforce_chdir" is created.
70101+
70102+config GRKERNSEC_CHROOT_CHMOD
70103+ bool "Deny (f)chmod +s"
70104+ default y if GRKERNSEC_CONFIG_AUTO
70105+ depends on GRKERNSEC_CHROOT
70106+ help
70107+ If you say Y here, processes inside a chroot will not be able to chmod
70108+ or fchmod files to make them have suid or sgid bits. This protects
70109+ against another published method of breaking a chroot. If the sysctl
70110+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
70111+ created.
70112+
70113+config GRKERNSEC_CHROOT_FCHDIR
70114+ bool "Deny fchdir and fhandle out of chroot"
70115+ default y if GRKERNSEC_CONFIG_AUTO
70116+ depends on GRKERNSEC_CHROOT
70117+ help
70118+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
70119+ to a file descriptor of the chrooting process that points to a directory
70120+ outside the filesystem will be stopped. Additionally, this option prevents
70121+ use of the recently-created syscall for opening files by a guessable "file
70122+ handle" inside a chroot. If the sysctl option is enabled, a sysctl option
70123+ with name "chroot_deny_fchdir" is created.
70124+
70125+config GRKERNSEC_CHROOT_MKNOD
70126+ bool "Deny mknod"
70127+ default y if GRKERNSEC_CONFIG_AUTO
70128+ depends on GRKERNSEC_CHROOT
70129+ help
70130+ If you say Y here, processes inside a chroot will not be allowed to
70131+ mknod. The problem with using mknod inside a chroot is that it
70132+ would allow an attacker to create a device entry that is the same
70133+ as one on the physical root of your system, which could range from
70134+ anything from the console device to a device for your harddrive (which
70135+ they could then use to wipe the drive or steal data). It is recommended
70136+ that you say Y here, unless you run into software incompatibilities.
70137+ If the sysctl option is enabled, a sysctl option with name
70138+ "chroot_deny_mknod" is created.
70139+
70140+config GRKERNSEC_CHROOT_SHMAT
70141+ bool "Deny shmat() out of chroot"
70142+ default y if GRKERNSEC_CONFIG_AUTO
70143+ depends on GRKERNSEC_CHROOT
70144+ help
70145+ If you say Y here, processes inside a chroot will not be able to attach
70146+ to shared memory segments that were created outside of the chroot jail.
70147+ It is recommended that you say Y here. If the sysctl option is enabled,
70148+ a sysctl option with name "chroot_deny_shmat" is created.
70149+
70150+config GRKERNSEC_CHROOT_UNIX
70151+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
70152+ default y if GRKERNSEC_CONFIG_AUTO
70153+ depends on GRKERNSEC_CHROOT
70154+ help
70155+ If you say Y here, processes inside a chroot will not be able to
70156+ connect to abstract (meaning not belonging to a filesystem) Unix
70157+ domain sockets that were bound outside of a chroot. It is recommended
70158+ that you say Y here. If the sysctl option is enabled, a sysctl option
70159+ with name "chroot_deny_unix" is created.
70160+
70161+config GRKERNSEC_CHROOT_FINDTASK
70162+ bool "Protect outside processes"
70163+ default y if GRKERNSEC_CONFIG_AUTO
70164+ depends on GRKERNSEC_CHROOT
70165+ help
70166+ If you say Y here, processes inside a chroot will not be able to
70167+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
70168+ getsid, or view any process outside of the chroot. If the sysctl
70169+ option is enabled, a sysctl option with name "chroot_findtask" is
70170+ created.
70171+
70172+config GRKERNSEC_CHROOT_NICE
70173+ bool "Restrict priority changes"
70174+ default y if GRKERNSEC_CONFIG_AUTO
70175+ depends on GRKERNSEC_CHROOT
70176+ help
70177+ If you say Y here, processes inside a chroot will not be able to raise
70178+ the priority of processes in the chroot, or alter the priority of
70179+ processes outside the chroot. This provides more security than simply
70180+ removing CAP_SYS_NICE from the process' capability set. If the
70181+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
70182+ is created.
70183+
70184+config GRKERNSEC_CHROOT_SYSCTL
70185+ bool "Deny sysctl writes"
70186+ default y if GRKERNSEC_CONFIG_AUTO
70187+ depends on GRKERNSEC_CHROOT
70188+ help
70189+ If you say Y here, an attacker in a chroot will not be able to
70190+ write to sysctl entries, either by sysctl(2) or through a /proc
70191+ interface. It is strongly recommended that you say Y here. If the
70192+ sysctl option is enabled, a sysctl option with name
70193+ "chroot_deny_sysctl" is created.
70194+
70195+config GRKERNSEC_CHROOT_RENAME
70196+ bool "Deny bad renames"
70197+ default y if GRKERNSEC_CONFIG_AUTO
70198+ depends on GRKERNSEC_CHROOT
70199+ help
70200+ If you say Y here, an attacker in a chroot will not be able to
70201+ abuse the ability to create double chroots to break out of the
70202+ chroot by exploiting a race condition between a rename of a directory
70203+ within a chroot against an open of a symlink with relative path
70204+ components. This feature will likewise prevent an accomplice outside
70205+ a chroot from enabling a user inside the chroot to break out and make
70206+ use of their credentials on the global filesystem. Enabling this
70207+ feature is essential to prevent root users from breaking out of a
70208+ chroot. If the sysctl option is enabled, a sysctl option with name
70209+ "chroot_deny_bad_rename" is created.
70210+
70211+config GRKERNSEC_CHROOT_CAPS
70212+ bool "Capability restrictions"
70213+ default y if GRKERNSEC_CONFIG_AUTO
70214+ depends on GRKERNSEC_CHROOT
70215+ help
70216+ If you say Y here, the capabilities on all processes within a
70217+ chroot jail will be lowered to stop module insertion, raw i/o,
70218+ system and net admin tasks, rebooting the system, modifying immutable
70219+ files, modifying IPC owned by another, and changing the system time.
70220+ This is left an option because it can break some apps. Disable this
70221+ if your chrooted apps are having problems performing those kinds of
70222+ tasks. If the sysctl option is enabled, a sysctl option with
70223+ name "chroot_caps" is created.
70224+
70225+config GRKERNSEC_CHROOT_INITRD
70226+ bool "Exempt initrd tasks from restrictions"
70227+ default y if GRKERNSEC_CONFIG_AUTO
70228+ depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
70229+ help
70230+ If you say Y here, tasks started prior to init will be exempted from
70231+ grsecurity's chroot restrictions. This option is mainly meant to
70232+ resolve Plymouth's performing privileged operations unnecessarily
70233+ in a chroot.
70234+
70235+endmenu
70236+menu "Kernel Auditing"
70237+depends on GRKERNSEC
70238+
70239+config GRKERNSEC_AUDIT_GROUP
70240+ bool "Single group for auditing"
70241+ help
70242+ If you say Y here, the exec and chdir logging features will only operate
70243+ on a group you specify. This option is recommended if you only want to
70244+ watch certain users instead of having a large amount of logs from the
70245+ entire system. If the sysctl option is enabled, a sysctl option with
70246+ name "audit_group" is created.
70247+
70248+config GRKERNSEC_AUDIT_GID
70249+ int "GID for auditing"
70250+ depends on GRKERNSEC_AUDIT_GROUP
70251+ default 1007
70252+
70253+config GRKERNSEC_EXECLOG
70254+ bool "Exec logging"
70255+ help
70256+ If you say Y here, all execve() calls will be logged (since the
70257+ other exec*() calls are frontends to execve(), all execution
70258+ will be logged). Useful for shell-servers that like to keep track
70259+ of their users. If the sysctl option is enabled, a sysctl option with
70260+ name "exec_logging" is created.
70261+ WARNING: This option when enabled will produce a LOT of logs, especially
70262+ on an active system.
70263+
70264+config GRKERNSEC_RESLOG
70265+ bool "Resource logging"
70266+ default y if GRKERNSEC_CONFIG_AUTO
70267+ help
70268+ If you say Y here, all attempts to overstep resource limits will
70269+ be logged with the resource name, the requested size, and the current
70270+ limit. It is highly recommended that you say Y here. If the sysctl
70271+ option is enabled, a sysctl option with name "resource_logging" is
70272+ created. If the RBAC system is enabled, the sysctl value is ignored.
70273+
70274+config GRKERNSEC_CHROOT_EXECLOG
70275+ bool "Log execs within chroot"
70276+ help
70277+ If you say Y here, all executions inside a chroot jail will be logged
70278+ to syslog. This can cause a large amount of logs if certain
70279+ applications (eg. djb's daemontools) are installed on the system, and
70280+ is therefore left as an option. If the sysctl option is enabled, a
70281+ sysctl option with name "chroot_execlog" is created.
70282+
70283+config GRKERNSEC_AUDIT_PTRACE
70284+ bool "Ptrace logging"
70285+ help
70286+ If you say Y here, all attempts to attach to a process via ptrace
70287+ will be logged. If the sysctl option is enabled, a sysctl option
70288+ with name "audit_ptrace" is created.
70289+
70290+config GRKERNSEC_AUDIT_CHDIR
70291+ bool "Chdir logging"
70292+ help
70293+ If you say Y here, all chdir() calls will be logged. If the sysctl
70294+ option is enabled, a sysctl option with name "audit_chdir" is created.
70295+
70296+config GRKERNSEC_AUDIT_MOUNT
70297+ bool "(Un)Mount logging"
70298+ help
70299+ If you say Y here, all mounts and unmounts will be logged. If the
70300+ sysctl option is enabled, a sysctl option with name "audit_mount" is
70301+ created.
70302+
70303+config GRKERNSEC_SIGNAL
70304+ bool "Signal logging"
70305+ default y if GRKERNSEC_CONFIG_AUTO
70306+ help
70307+ If you say Y here, certain important signals will be logged, such as
70308+ SIGSEGV, which will as a result inform you of when a error in a program
70309+ occurred, which in some cases could mean a possible exploit attempt.
70310+ If the sysctl option is enabled, a sysctl option with name
70311+ "signal_logging" is created.
70312+
70313+config GRKERNSEC_FORKFAIL
70314+ bool "Fork failure logging"
70315+ help
70316+ If you say Y here, all failed fork() attempts will be logged.
70317+ This could suggest a fork bomb, or someone attempting to overstep
70318+ their process limit. If the sysctl option is enabled, a sysctl option
70319+ with name "forkfail_logging" is created.
70320+
70321+config GRKERNSEC_TIME
70322+ bool "Time change logging"
70323+ default y if GRKERNSEC_CONFIG_AUTO
70324+ help
70325+ If you say Y here, any changes of the system clock will be logged.
70326+ If the sysctl option is enabled, a sysctl option with name
70327+ "timechange_logging" is created.
70328+
70329+config GRKERNSEC_PROC_IPADDR
70330+ bool "/proc/<pid>/ipaddr support"
70331+ default y if GRKERNSEC_CONFIG_AUTO
70332+ help
70333+ If you say Y here, a new entry will be added to each /proc/<pid>
70334+ directory that contains the IP address of the person using the task.
70335+ The IP is carried across local TCP and AF_UNIX stream sockets.
70336+ This information can be useful for IDS/IPSes to perform remote response
70337+ to a local attack. The entry is readable by only the owner of the
70338+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
70339+ the RBAC system), and thus does not create privacy concerns.
70340+
70341+config GRKERNSEC_RWXMAP_LOG
70342+ bool 'Denied RWX mmap/mprotect logging'
70343+ default y if GRKERNSEC_CONFIG_AUTO
70344+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
70345+ help
70346+ If you say Y here, calls to mmap() and mprotect() with explicit
70347+ usage of PROT_WRITE and PROT_EXEC together will be logged when
70348+ denied by the PAX_MPROTECT feature. This feature will also
70349+ log other problematic scenarios that can occur when PAX_MPROTECT
70350+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
70351+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
70352+ is created.
70353+
70354+endmenu
70355+
70356+menu "Executable Protections"
70357+depends on GRKERNSEC
70358+
70359+config GRKERNSEC_DMESG
70360+ bool "Dmesg(8) restriction"
70361+ default y if GRKERNSEC_CONFIG_AUTO
70362+ help
70363+ If you say Y here, non-root users will not be able to use dmesg(8)
70364+ to view the contents of the kernel's circular log buffer.
70365+ The kernel's log buffer often contains kernel addresses and other
70366+ identifying information useful to an attacker in fingerprinting a
70367+ system for a targeted exploit.
70368+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
70369+ created.
70370+
70371+config GRKERNSEC_HARDEN_PTRACE
70372+ bool "Deter ptrace-based process snooping"
70373+ default y if GRKERNSEC_CONFIG_AUTO
70374+ help
70375+ If you say Y here, TTY sniffers and other malicious monitoring
70376+ programs implemented through ptrace will be defeated. If you
70377+ have been using the RBAC system, this option has already been
70378+ enabled for several years for all users, with the ability to make
70379+ fine-grained exceptions.
70380+
70381+ This option only affects the ability of non-root users to ptrace
70382+ processes that are not a descendent of the ptracing process.
70383+ This means that strace ./binary and gdb ./binary will still work,
70384+ but attaching to arbitrary processes will not. If the sysctl
70385+ option is enabled, a sysctl option with name "harden_ptrace" is
70386+ created.
70387+
70388+config GRKERNSEC_PTRACE_READEXEC
70389+ bool "Require read access to ptrace sensitive binaries"
70390+ default y if GRKERNSEC_CONFIG_AUTO
70391+ help
70392+ If you say Y here, unprivileged users will not be able to ptrace unreadable
70393+ binaries. This option is useful in environments that
70394+ remove the read bits (e.g. file mode 4711) from suid binaries to
70395+ prevent infoleaking of their contents. This option adds
70396+ consistency to the use of that file mode, as the binary could normally
70397+ be read out when run without privileges while ptracing.
70398+
70399+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
70400+ is created.
70401+
70402+config GRKERNSEC_SETXID
70403+ bool "Enforce consistent multithreaded privileges"
70404+ default y if GRKERNSEC_CONFIG_AUTO
70405+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
70406+ help
70407+ If you say Y here, a change from a root uid to a non-root uid
70408+ in a multithreaded application will cause the resulting uids,
70409+ gids, supplementary groups, and capabilities in that thread
70410+ to be propagated to the other threads of the process. In most
70411+ cases this is unnecessary, as glibc will emulate this behavior
70412+ on behalf of the application. Other libcs do not act in the
70413+ same way, allowing the other threads of the process to continue
70414+ running with root privileges. If the sysctl option is enabled,
70415+ a sysctl option with name "consistent_setxid" is created.
70416+
70417+config GRKERNSEC_HARDEN_IPC
70418+ bool "Disallow access to overly-permissive IPC objects"
70419+ default y if GRKERNSEC_CONFIG_AUTO
70420+ depends on SYSVIPC
70421+ help
70422+ If you say Y here, access to overly-permissive IPC objects (shared
70423+ memory, message queues, and semaphores) will be denied for processes
70424+ given the following criteria beyond normal permission checks:
70425+ 1) If the IPC object is world-accessible and the euid doesn't match
70426+ that of the creator or current uid for the IPC object
70427+ 2) If the IPC object is group-accessible and the egid doesn't
70428+ match that of the creator or current gid for the IPC object
70429+ It's a common error to grant too much permission to these objects,
70430+ with impact ranging from denial of service and information leaking to
70431+ privilege escalation. This feature was developed in response to
70432+ research by Tim Brown:
70433+ http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
70434+ who found hundreds of such insecure usages. Processes with
70435+ CAP_IPC_OWNER are still permitted to access these IPC objects.
70436+ If the sysctl option is enabled, a sysctl option with name
70437+ "harden_ipc" is created.
70438+
70439+config GRKERNSEC_TPE
70440+ bool "Trusted Path Execution (TPE)"
70441+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
70442+ help
70443+ If you say Y here, you will be able to choose a gid to add to the
70444+ supplementary groups of users you want to mark as "untrusted."
70445+ These users will not be able to execute any files that are not in
70446+ root-owned directories writable only by root. If the sysctl option
70447+ is enabled, a sysctl option with name "tpe" is created.
70448+
70449+config GRKERNSEC_TPE_ALL
70450+ bool "Partially restrict all non-root users"
70451+ depends on GRKERNSEC_TPE
70452+ help
70453+ If you say Y here, all non-root users will be covered under
70454+ a weaker TPE restriction. This is separate from, and in addition to,
70455+ the main TPE options that you have selected elsewhere. Thus, if a
70456+ "trusted" GID is chosen, this restriction applies to even that GID.
70457+ Under this restriction, all non-root users will only be allowed to
70458+ execute files in directories they own that are not group or
70459+ world-writable, or in directories owned by root and writable only by
70460+ root. If the sysctl option is enabled, a sysctl option with name
70461+ "tpe_restrict_all" is created.
70462+
70463+config GRKERNSEC_TPE_INVERT
70464+ bool "Invert GID option"
70465+ depends on GRKERNSEC_TPE
70466+ help
70467+ If you say Y here, the group you specify in the TPE configuration will
70468+ decide what group TPE restrictions will be *disabled* for. This
70469+ option is useful if you want TPE restrictions to be applied to most
70470+ users on the system. If the sysctl option is enabled, a sysctl option
70471+ with name "tpe_invert" is created. Unlike other sysctl options, this
70472+ entry will default to on for backward-compatibility.
70473+
70474+config GRKERNSEC_TPE_GID
70475+ int
70476+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
70477+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
70478+
70479+config GRKERNSEC_TPE_UNTRUSTED_GID
70480+ int "GID for TPE-untrusted users"
70481+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
70482+ default 1005
70483+ help
70484+ Setting this GID determines what group TPE restrictions will be
70485+ *enabled* for. If the sysctl option is enabled, a sysctl option
70486+ with name "tpe_gid" is created.
70487+
70488+config GRKERNSEC_TPE_TRUSTED_GID
70489+ int "GID for TPE-trusted users"
70490+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
70491+ default 1005
70492+ help
70493+ Setting this GID determines what group TPE restrictions will be
70494+ *disabled* for. If the sysctl option is enabled, a sysctl option
70495+ with name "tpe_gid" is created.
70496+
70497+endmenu
70498+menu "Network Protections"
70499+depends on GRKERNSEC
70500+
70501+config GRKERNSEC_BLACKHOLE
70502+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
70503+ default y if GRKERNSEC_CONFIG_AUTO
70504+ depends on NET
70505+ help
70506+ If you say Y here, neither TCP resets nor ICMP
70507+ destination-unreachable packets will be sent in response to packets
70508+ sent to ports for which no associated listening process exists.
70509+ It will also prevent the sending of ICMP protocol unreachable packets
70510+ in response to packets with unknown protocols.
70511+ This feature supports both IPV4 and IPV6 and exempts the
70512+ loopback interface from blackholing. Enabling this feature
70513+ makes a host more resilient to DoS attacks and reduces network
70514+ visibility against scanners.
70515+
70516+ The blackhole feature as-implemented is equivalent to the FreeBSD
70517+ blackhole feature, as it prevents RST responses to all packets, not
70518+ just SYNs. Under most application behavior this causes no
70519+ problems, but applications (like haproxy) may not close certain
70520+ connections in a way that cleanly terminates them on the remote
70521+ end, leaving the remote host in LAST_ACK state. Because of this
70522+ side-effect and to prevent intentional LAST_ACK DoSes, this
70523+ feature also adds automatic mitigation against such attacks.
70524+ The mitigation drastically reduces the amount of time a socket
70525+ can spend in LAST_ACK state. If you're using haproxy and not
70526+ all servers it connects to have this option enabled, consider
70527+ disabling this feature on the haproxy host.
70528+
70529+ If the sysctl option is enabled, two sysctl options with names
70530+ "ip_blackhole" and "lastack_retries" will be created.
70531+ While "ip_blackhole" takes the standard zero/non-zero on/off
70532+ toggle, "lastack_retries" uses the same kinds of values as
70533+ "tcp_retries1" and "tcp_retries2". The default value of 4
70534+ prevents a socket from lasting more than 45 seconds in LAST_ACK
70535+ state.
70536+
70537+config GRKERNSEC_NO_SIMULT_CONNECT
70538+ bool "Disable TCP Simultaneous Connect"
70539+ default y if GRKERNSEC_CONFIG_AUTO
70540+ depends on NET
70541+ help
70542+ If you say Y here, a feature by Willy Tarreau will be enabled that
70543+ removes a weakness in Linux's strict implementation of TCP that
70544+ allows two clients to connect to each other without either entering
70545+ a listening state. The weakness allows an attacker to easily prevent
70546+ a client from connecting to a known server provided the source port
70547+ for the connection is guessed correctly.
70548+
70549+ As the weakness could be used to prevent an antivirus or IPS from
70550+ fetching updates, or prevent an SSL gateway from fetching a CRL,
70551+ it should be eliminated by enabling this option. Though Linux is
70552+ one of few operating systems supporting simultaneous connect, it
70553+ has no legitimate use in practice and is rarely supported by firewalls.
70554+
70555+config GRKERNSEC_SOCKET
70556+ bool "Socket restrictions"
70557+ depends on NET
70558+ help
70559+ If you say Y here, you will be able to choose from several options.
70560+ If you assign a GID on your system and add it to the supplementary
70561+ groups of users you want to restrict socket access to, this patch
70562+ will perform up to three things, based on the option(s) you choose.
70563+
70564+config GRKERNSEC_SOCKET_ALL
70565+ bool "Deny any sockets to group"
70566+ depends on GRKERNSEC_SOCKET
70567+ help
70568+ If you say Y here, you will be able to choose a GID of whose users will
70569+ be unable to connect to other hosts from your machine or run server
70570+ applications from your machine. If the sysctl option is enabled, a
70571+ sysctl option with name "socket_all" is created.
70572+
70573+config GRKERNSEC_SOCKET_ALL_GID
70574+ int "GID to deny all sockets for"
70575+ depends on GRKERNSEC_SOCKET_ALL
70576+ default 1004
70577+ help
70578+ Here you can choose the GID to disable socket access for. Remember to
70579+ add the users you want socket access disabled for to the GID
70580+ specified here. If the sysctl option is enabled, a sysctl option
70581+ with name "socket_all_gid" is created.
70582+
70583+config GRKERNSEC_SOCKET_CLIENT
70584+ bool "Deny client sockets to group"
70585+ depends on GRKERNSEC_SOCKET
70586+ help
70587+ If you say Y here, you will be able to choose a GID of whose users will
70588+ be unable to connect to other hosts from your machine, but will be
70589+ able to run servers. If this option is enabled, all users in the group
70590+ you specify will have to use passive mode when initiating ftp transfers
70591+ from the shell on your machine. If the sysctl option is enabled, a
70592+ sysctl option with name "socket_client" is created.
70593+
70594+config GRKERNSEC_SOCKET_CLIENT_GID
70595+ int "GID to deny client sockets for"
70596+ depends on GRKERNSEC_SOCKET_CLIENT
70597+ default 1003
70598+ help
70599+ Here you can choose the GID to disable client socket access for.
70600+ Remember to add the users you want client socket access disabled for to
70601+ the GID specified here. If the sysctl option is enabled, a sysctl
70602+ option with name "socket_client_gid" is created.
70603+
70604+config GRKERNSEC_SOCKET_SERVER
70605+ bool "Deny server sockets to group"
70606+ depends on GRKERNSEC_SOCKET
70607+ help
70608+ If you say Y here, you will be able to choose a GID of whose users will
70609+ be unable to run server applications from your machine. If the sysctl
70610+ option is enabled, a sysctl option with name "socket_server" is created.
70611+
70612+config GRKERNSEC_SOCKET_SERVER_GID
70613+ int "GID to deny server sockets for"
70614+ depends on GRKERNSEC_SOCKET_SERVER
70615+ default 1002
70616+ help
70617+ Here you can choose the GID to disable server socket access for.
70618+ Remember to add the users you want server socket access disabled for to
70619+ the GID specified here. If the sysctl option is enabled, a sysctl
70620+ option with name "socket_server_gid" is created.
70621+
70622+endmenu
70623+
70624+menu "Physical Protections"
70625+depends on GRKERNSEC
70626+
70627+config GRKERNSEC_DENYUSB
70628+ bool "Deny new USB connections after toggle"
70629+ default y if GRKERNSEC_CONFIG_AUTO
70630+ depends on SYSCTL && USB_SUPPORT
70631+ help
70632+ If you say Y here, a new sysctl option with name "deny_new_usb"
70633+ will be created. Setting its value to 1 will prevent any new
70634+ USB devices from being recognized by the OS. Any attempted USB
70635+ device insertion will be logged. This option is intended to be
70636+ used against custom USB devices designed to exploit vulnerabilities
70637+ in various USB device drivers.
70638+
70639+ For greatest effectiveness, this sysctl should be set after any
70640+ relevant init scripts. This option is safe to enable in distros
70641+ as each user can choose whether or not to toggle the sysctl.
70642+
70643+config GRKERNSEC_DENYUSB_FORCE
70644+ bool "Reject all USB devices not connected at boot"
70645+ select USB
70646+ depends on GRKERNSEC_DENYUSB
70647+ help
70648+ If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
70649+ that doesn't involve a sysctl entry. This option should only be
70650+ enabled if you're sure you want to deny all new USB connections
70651+ at runtime and don't want to modify init scripts. This should not
70652+ be enabled by distros. It forces the core USB code to be built
70653+ into the kernel image so that all devices connected at boot time
70654+ can be recognized and new USB device connections can be prevented
70655+ prior to init running.
70656+
70657+endmenu
70658+
70659+menu "Sysctl Support"
70660+depends on GRKERNSEC && SYSCTL
70661+
70662+config GRKERNSEC_SYSCTL
70663+ bool "Sysctl support"
70664+ default y if GRKERNSEC_CONFIG_AUTO
70665+ help
70666+ If you say Y here, you will be able to change the options that
70667+ grsecurity runs with at bootup, without having to recompile your
70668+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
70669+ to enable (1) or disable (0) various features. All the sysctl entries
70670+ are mutable until the "grsec_lock" entry is set to a non-zero value.
70671+ All features enabled in the kernel configuration are disabled at boot
70672+ if you do not say Y to the "Turn on features by default" option.
70673+ All options should be set at startup, and the grsec_lock entry should
70674+ be set to a non-zero value after all the options are set.
70675+ *THIS IS EXTREMELY IMPORTANT*
70676+
70677+config GRKERNSEC_SYSCTL_DISTRO
70678+ bool "Extra sysctl support for distro makers (READ HELP)"
70679+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
70680+ help
70681+ If you say Y here, additional sysctl options will be created
70682+ for features that affect processes running as root. Therefore,
70683+ it is critical when using this option that the grsec_lock entry be
70684+ enabled after boot. Only distros with prebuilt kernel packages
70685+ with this option enabled that can ensure grsec_lock is enabled
70686+ after boot should use this option.
70687+ *Failure to set grsec_lock after boot makes all grsec features
70688+ this option covers useless*
70689+
70690+ Currently this option creates the following sysctl entries:
70691+ "Disable Privileged I/O": "disable_priv_io"
70692+
70693+config GRKERNSEC_SYSCTL_ON
70694+ bool "Turn on features by default"
70695+ default y if GRKERNSEC_CONFIG_AUTO
70696+ depends on GRKERNSEC_SYSCTL
70697+ help
70698+ If you say Y here, instead of having all features enabled in the
70699+ kernel configuration disabled at boot time, the features will be
70700+ enabled at boot time. It is recommended you say Y here unless
70701+ there is some reason you would want all sysctl-tunable features to
70702+ be disabled by default. As mentioned elsewhere, it is important
70703+ to enable the grsec_lock entry once you have finished modifying
70704+ the sysctl entries.
70705+
70706+endmenu
70707+menu "Logging Options"
70708+depends on GRKERNSEC
70709+
70710+config GRKERNSEC_FLOODTIME
70711+ int "Seconds in between log messages (minimum)"
70712+ default 10
70713+ help
70714+ This option allows you to enforce the number of seconds between
70715+ grsecurity log messages. The default should be suitable for most
70716+ people, however, if you choose to change it, choose a value small enough
70717+ to allow informative logs to be produced, but large enough to
70718+ prevent flooding.
70719+
70720+ Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable
70721+ any rate limiting on grsecurity log messages.
70722+
70723+config GRKERNSEC_FLOODBURST
70724+ int "Number of messages in a burst (maximum)"
70725+ default 6
70726+ help
70727+ This option allows you to choose the maximum number of messages allowed
70728+ within the flood time interval you chose in a separate option. The
70729+ default should be suitable for most people, however if you find that
70730+ many of your logs are being interpreted as flooding, you may want to
70731+ raise this value.
70732+
70733+ Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable
70734+ any rate limiting on grsecurity log messages.
70735+
70736+endmenu
70737diff --git a/grsecurity/Makefile b/grsecurity/Makefile
70738new file mode 100644
70739index 0000000..30ababb
70740--- /dev/null
70741+++ b/grsecurity/Makefile
70742@@ -0,0 +1,54 @@
70743+# grsecurity – access control and security hardening for Linux
70744+# All code in this directory and various hooks located throughout the Linux kernel are
70745+# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc.
70746+# http://www.grsecurity.net spender@grsecurity.net
70747+#
70748+# This program is free software; you can redistribute it and/or
70749+# modify it under the terms of the GNU General Public License version 2
70750+# as published by the Free Software Foundation.
70751+#
70752+# This program is distributed in the hope that it will be useful,
70753+# but WITHOUT ANY WARRANTY; without even the implied warranty of
70754+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
70755+# GNU General Public License for more details.
70756+#
70757+# You should have received a copy of the GNU General Public License
70758+# along with this program; if not, write to the Free Software
70759+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
70760+
70761+KBUILD_CFLAGS += -Werror
70762+
70763+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
70764+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
70765+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
70766+ grsec_usb.o grsec_ipc.o grsec_proc.o
70767+
70768+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
70769+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
70770+ gracl_learn.o grsec_log.o gracl_policy.o
70771+ifdef CONFIG_COMPAT
70772+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
70773+endif
70774+
70775+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
70776+
70777+ifdef CONFIG_NET
70778+obj-y += grsec_sock.o
70779+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
70780+endif
70781+
70782+ifndef CONFIG_GRKERNSEC
70783+obj-y += grsec_disabled.o
70784+endif
70785+
70786+ifdef CONFIG_GRKERNSEC_HIDESYM
70787+extra-y := grsec_hidesym.o
70788+$(obj)/grsec_hidesym.o:
70789+ @-chmod -f 500 /boot
70790+ @-chmod -f 500 /lib/modules
70791+ @-chmod -f 500 /lib64/modules
70792+ @-chmod -f 500 /lib32/modules
70793+ @-chmod -f 700 .
70794+ @-chmod -f 700 $(objtree)
70795+ @echo ' grsec: protected kernel image paths'
70796+endif
70797diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
70798new file mode 100644
70799index 0000000..811af1f
70800--- /dev/null
70801+++ b/grsecurity/gracl.c
70802@@ -0,0 +1,2749 @@
70803+#include <linux/kernel.h>
70804+#include <linux/module.h>
70805+#include <linux/sched.h>
70806+#include <linux/mm.h>
70807+#include <linux/file.h>
70808+#include <linux/fs.h>
70809+#include <linux/namei.h>
70810+#include <linux/mount.h>
70811+#include <linux/tty.h>
70812+#include <linux/proc_fs.h>
70813+#include <linux/lglock.h>
70814+#include <linux/slab.h>
70815+#include <linux/vmalloc.h>
70816+#include <linux/types.h>
70817+#include <linux/sysctl.h>
70818+#include <linux/netdevice.h>
70819+#include <linux/ptrace.h>
70820+#include <linux/gracl.h>
70821+#include <linux/gralloc.h>
70822+#include <linux/security.h>
70823+#include <linux/grinternal.h>
70824+#include <linux/pid_namespace.h>
70825+#include <linux/stop_machine.h>
70826+#include <linux/fdtable.h>
70827+#include <linux/percpu.h>
70828+#include <linux/lglock.h>
70829+#include <linux/hugetlb.h>
70830+#include <linux/posix-timers.h>
70831+#include <linux/prefetch.h>
70832+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
70833+#include <linux/magic.h>
70834+#include <linux/pagemap.h>
70835+#include "../fs/btrfs/async-thread.h"
70836+#include "../fs/btrfs/ctree.h"
70837+#include "../fs/btrfs/btrfs_inode.h"
70838+#endif
70839+#include "../fs/mount.h"
70840+
70841+#include <asm/uaccess.h>
70842+#include <asm/errno.h>
70843+#include <asm/mman.h>
70844+
70845+#define FOR_EACH_ROLE_START(role) \
70846+ role = running_polstate.role_list; \
70847+ while (role) {
70848+
70849+#define FOR_EACH_ROLE_END(role) \
70850+ role = role->prev; \
70851+ }
70852+
70853+extern struct path gr_real_root;
70854+
70855+static struct gr_policy_state running_polstate;
70856+struct gr_policy_state *polstate = &running_polstate;
70857+extern struct gr_alloc_state *current_alloc_state;
70858+
70859+extern char *gr_shared_page[4];
70860+DEFINE_RWLOCK(gr_inode_lock);
70861+
70862+static unsigned int gr_status __read_only = GR_STATUS_INIT;
70863+
70864+#ifdef CONFIG_NET
70865+extern struct vfsmount *sock_mnt;
70866+#endif
70867+
70868+extern struct vfsmount *pipe_mnt;
70869+extern struct vfsmount *shm_mnt;
70870+
70871+#ifdef CONFIG_HUGETLBFS
70872+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
70873+#endif
70874+
70875+extern u16 acl_sp_role_value;
70876+extern struct acl_object_label *fakefs_obj_rw;
70877+extern struct acl_object_label *fakefs_obj_rwx;
70878+
70879+int gr_acl_is_enabled(void)
70880+{
70881+ return (gr_status & GR_READY);
70882+}
70883+
70884+void gr_enable_rbac_system(void)
70885+{
70886+ pax_open_kernel();
70887+ gr_status |= GR_READY;
70888+ pax_close_kernel();
70889+}
70890+
70891+int gr_rbac_disable(void *unused)
70892+{
70893+ pax_open_kernel();
70894+ gr_status &= ~GR_READY;
70895+ pax_close_kernel();
70896+
70897+ return 0;
70898+}
70899+
70900+static inline dev_t __get_dev(const struct dentry *dentry)
70901+{
70902+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
70903+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
70904+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
70905+ else
70906+#endif
70907+ return dentry->d_sb->s_dev;
70908+}
70909+
70910+static inline u64 __get_ino(const struct dentry *dentry)
70911+{
70912+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
70913+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
70914+ return btrfs_ino(dentry->d_inode);
70915+ else
70916+#endif
70917+ return dentry->d_inode->i_ino;
70918+}
70919+
70920+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
70921+{
70922+ return __get_dev(dentry);
70923+}
70924+
70925+u64 gr_get_ino_from_dentry(struct dentry *dentry)
70926+{
70927+ return __get_ino(dentry);
70928+}
70929+
70930+static char gr_task_roletype_to_char(struct task_struct *task)
70931+{
70932+ switch (task->role->roletype &
70933+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
70934+ GR_ROLE_SPECIAL)) {
70935+ case GR_ROLE_DEFAULT:
70936+ return 'D';
70937+ case GR_ROLE_USER:
70938+ return 'U';
70939+ case GR_ROLE_GROUP:
70940+ return 'G';
70941+ case GR_ROLE_SPECIAL:
70942+ return 'S';
70943+ }
70944+
70945+ return 'X';
70946+}
70947+
70948+char gr_roletype_to_char(void)
70949+{
70950+ return gr_task_roletype_to_char(current);
70951+}
70952+
70953+int
70954+gr_acl_tpe_check(void)
70955+{
70956+ if (unlikely(!(gr_status & GR_READY)))
70957+ return 0;
70958+ if (current->role->roletype & GR_ROLE_TPE)
70959+ return 1;
70960+ else
70961+ return 0;
70962+}
70963+
70964+int
70965+gr_handle_rawio(const struct inode *inode)
70966+{
70967+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
70968+ if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) &&
70969+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
70970+ !capable(CAP_SYS_RAWIO))
70971+ return 1;
70972+#endif
70973+ return 0;
70974+}
70975+
70976+int
70977+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
70978+{
70979+ if (likely(lena != lenb))
70980+ return 0;
70981+
70982+ return !memcmp(a, b, lena);
70983+}
70984+
70985+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
70986+{
70987+ *buflen -= namelen;
70988+ if (*buflen < 0)
70989+ return -ENAMETOOLONG;
70990+ *buffer -= namelen;
70991+ memcpy(*buffer, str, namelen);
70992+ return 0;
70993+}
70994+
70995+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
70996+{
70997+ return prepend(buffer, buflen, name->name, name->len);
70998+}
70999+
71000+static int prepend_path(const struct path *path, struct path *root,
71001+ char **buffer, int *buflen)
71002+{
71003+ struct dentry *dentry = path->dentry;
71004+ struct vfsmount *vfsmnt = path->mnt;
71005+ struct mount *mnt = real_mount(vfsmnt);
71006+ bool slash = false;
71007+ int error = 0;
71008+
71009+ while (dentry != root->dentry || vfsmnt != root->mnt) {
71010+ struct dentry * parent;
71011+
71012+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
71013+ /* Global root? */
71014+ if (!mnt_has_parent(mnt)) {
71015+ goto out;
71016+ }
71017+ dentry = mnt->mnt_mountpoint;
71018+ mnt = mnt->mnt_parent;
71019+ vfsmnt = &mnt->mnt;
71020+ continue;
71021+ }
71022+ parent = dentry->d_parent;
71023+ prefetch(parent);
71024+ spin_lock(&dentry->d_lock);
71025+ error = prepend_name(buffer, buflen, &dentry->d_name);
71026+ spin_unlock(&dentry->d_lock);
71027+ if (!error)
71028+ error = prepend(buffer, buflen, "/", 1);
71029+ if (error)
71030+ break;
71031+
71032+ slash = true;
71033+ dentry = parent;
71034+ }
71035+
71036+out:
71037+ if (!error && !slash)
71038+ error = prepend(buffer, buflen, "/", 1);
71039+
71040+ return error;
71041+}
71042+
71043+/* this must be called with mount_lock and rename_lock held */
71044+
71045+static char *__our_d_path(const struct path *path, struct path *root,
71046+ char *buf, int buflen)
71047+{
71048+ char *res = buf + buflen;
71049+ int error;
71050+
71051+ prepend(&res, &buflen, "\0", 1);
71052+ error = prepend_path(path, root, &res, &buflen);
71053+ if (error)
71054+ return ERR_PTR(error);
71055+
71056+ return res;
71057+}
71058+
71059+static char *
71060+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
71061+{
71062+ char *retval;
71063+
71064+ retval = __our_d_path(path, root, buf, buflen);
71065+ if (unlikely(IS_ERR(retval)))
71066+ retval = strcpy(buf, "<path too long>");
71067+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
71068+ retval[1] = '\0';
71069+
71070+ return retval;
71071+}
71072+
71073+static char *
71074+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
71075+ char *buf, int buflen)
71076+{
71077+ struct path path;
71078+ char *res;
71079+
71080+ path.dentry = (struct dentry *)dentry;
71081+ path.mnt = (struct vfsmount *)vfsmnt;
71082+
71083+ /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
71084+ by the RBAC system */
71085+ res = gen_full_path(&path, &gr_real_root, buf, buflen);
71086+
71087+ return res;
71088+}
71089+
71090+static char *
71091+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
71092+ char *buf, int buflen)
71093+{
71094+ char *res;
71095+ struct path path;
71096+ struct path root;
71097+ struct task_struct *reaper = init_pid_ns.child_reaper;
71098+
71099+ path.dentry = (struct dentry *)dentry;
71100+ path.mnt = (struct vfsmount *)vfsmnt;
71101+
71102+ /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
71103+ get_fs_root(reaper->fs, &root);
71104+
71105+ read_seqlock_excl(&mount_lock);
71106+ write_seqlock(&rename_lock);
71107+ res = gen_full_path(&path, &root, buf, buflen);
71108+ write_sequnlock(&rename_lock);
71109+ read_sequnlock_excl(&mount_lock);
71110+
71111+ path_put(&root);
71112+ return res;
71113+}
71114+
71115+char *
71116+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
71117+{
71118+ char *ret;
71119+ read_seqlock_excl(&mount_lock);
71120+ write_seqlock(&rename_lock);
71121+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
71122+ PAGE_SIZE);
71123+ write_sequnlock(&rename_lock);
71124+ read_sequnlock_excl(&mount_lock);
71125+ return ret;
71126+}
71127+
71128+static char *
71129+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
71130+{
71131+ char *ret;
71132+ char *buf;
71133+ int buflen;
71134+
71135+ read_seqlock_excl(&mount_lock);
71136+ write_seqlock(&rename_lock);
71137+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
71138+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
71139+ buflen = (int)(ret - buf);
71140+ if (buflen >= 5)
71141+ prepend(&ret, &buflen, "/proc", 5);
71142+ else
71143+ ret = strcpy(buf, "<path too long>");
71144+ write_sequnlock(&rename_lock);
71145+ read_sequnlock_excl(&mount_lock);
71146+ return ret;
71147+}
71148+
71149+char *
71150+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
71151+{
71152+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
71153+ PAGE_SIZE);
71154+}
71155+
71156+char *
71157+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
71158+{
71159+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
71160+ PAGE_SIZE);
71161+}
71162+
71163+char *
71164+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
71165+{
71166+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
71167+ PAGE_SIZE);
71168+}
71169+
71170+char *
71171+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
71172+{
71173+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
71174+ PAGE_SIZE);
71175+}
71176+
71177+char *
71178+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
71179+{
71180+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
71181+ PAGE_SIZE);
71182+}
71183+
71184+__u32
71185+to_gr_audit(const __u32 reqmode)
71186+{
71187+ /* masks off auditable permission flags, then shifts them to create
71188+ auditing flags, and adds the special case of append auditing if
71189+ we're requesting write */
71190+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
71191+}
71192+
71193+struct acl_role_label *
71194+__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
71195+ const gid_t gid)
71196+{
71197+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
71198+ struct acl_role_label *match;
71199+ struct role_allowed_ip *ipp;
71200+ unsigned int x;
71201+ u32 curr_ip = task->signal->saved_ip;
71202+
71203+ match = state->acl_role_set.r_hash[index];
71204+
71205+ while (match) {
71206+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
71207+ for (x = 0; x < match->domain_child_num; x++) {
71208+ if (match->domain_children[x] == uid)
71209+ goto found;
71210+ }
71211+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
71212+ break;
71213+ match = match->next;
71214+ }
71215+found:
71216+ if (match == NULL) {
71217+ try_group:
71218+ index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
71219+ match = state->acl_role_set.r_hash[index];
71220+
71221+ while (match) {
71222+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
71223+ for (x = 0; x < match->domain_child_num; x++) {
71224+ if (match->domain_children[x] == gid)
71225+ goto found2;
71226+ }
71227+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
71228+ break;
71229+ match = match->next;
71230+ }
71231+found2:
71232+ if (match == NULL)
71233+ match = state->default_role;
71234+ if (match->allowed_ips == NULL)
71235+ return match;
71236+ else {
71237+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
71238+ if (likely
71239+ ((ntohl(curr_ip) & ipp->netmask) ==
71240+ (ntohl(ipp->addr) & ipp->netmask)))
71241+ return match;
71242+ }
71243+ match = state->default_role;
71244+ }
71245+ } else if (match->allowed_ips == NULL) {
71246+ return match;
71247+ } else {
71248+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
71249+ if (likely
71250+ ((ntohl(curr_ip) & ipp->netmask) ==
71251+ (ntohl(ipp->addr) & ipp->netmask)))
71252+ return match;
71253+ }
71254+ goto try_group;
71255+ }
71256+
71257+ return match;
71258+}
71259+
71260+static struct acl_role_label *
71261+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
71262+ const gid_t gid)
71263+{
71264+ return __lookup_acl_role_label(&running_polstate, task, uid, gid);
71265+}
71266+
71267+struct acl_subject_label *
71268+lookup_acl_subj_label(const u64 ino, const dev_t dev,
71269+ const struct acl_role_label *role)
71270+{
71271+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
71272+ struct acl_subject_label *match;
71273+
71274+ match = role->subj_hash[index];
71275+
71276+ while (match && (match->inode != ino || match->device != dev ||
71277+ (match->mode & GR_DELETED))) {
71278+ match = match->next;
71279+ }
71280+
71281+ if (match && !(match->mode & GR_DELETED))
71282+ return match;
71283+ else
71284+ return NULL;
71285+}
71286+
71287+struct acl_subject_label *
71288+lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev,
71289+ const struct acl_role_label *role)
71290+{
71291+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
71292+ struct acl_subject_label *match;
71293+
71294+ match = role->subj_hash[index];
71295+
71296+ while (match && (match->inode != ino || match->device != dev ||
71297+ !(match->mode & GR_DELETED))) {
71298+ match = match->next;
71299+ }
71300+
71301+ if (match && (match->mode & GR_DELETED))
71302+ return match;
71303+ else
71304+ return NULL;
71305+}
71306+
71307+static struct acl_object_label *
71308+lookup_acl_obj_label(const u64 ino, const dev_t dev,
71309+ const struct acl_subject_label *subj)
71310+{
71311+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
71312+ struct acl_object_label *match;
71313+
71314+ match = subj->obj_hash[index];
71315+
71316+ while (match && (match->inode != ino || match->device != dev ||
71317+ (match->mode & GR_DELETED))) {
71318+ match = match->next;
71319+ }
71320+
71321+ if (match && !(match->mode & GR_DELETED))
71322+ return match;
71323+ else
71324+ return NULL;
71325+}
71326+
71327+static struct acl_object_label *
71328+lookup_acl_obj_label_create(const u64 ino, const dev_t dev,
71329+ const struct acl_subject_label *subj)
71330+{
71331+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
71332+ struct acl_object_label *match;
71333+
71334+ match = subj->obj_hash[index];
71335+
71336+ while (match && (match->inode != ino || match->device != dev ||
71337+ !(match->mode & GR_DELETED))) {
71338+ match = match->next;
71339+ }
71340+
71341+ if (match && (match->mode & GR_DELETED))
71342+ return match;
71343+
71344+ match = subj->obj_hash[index];
71345+
71346+ while (match && (match->inode != ino || match->device != dev ||
71347+ (match->mode & GR_DELETED))) {
71348+ match = match->next;
71349+ }
71350+
71351+ if (match && !(match->mode & GR_DELETED))
71352+ return match;
71353+ else
71354+ return NULL;
71355+}
71356+
71357+struct name_entry *
71358+__lookup_name_entry(const struct gr_policy_state *state, const char *name)
71359+{
71360+ unsigned int len = strlen(name);
71361+ unsigned int key = full_name_hash(name, len);
71362+ unsigned int index = key % state->name_set.n_size;
71363+ struct name_entry *match;
71364+
71365+ match = state->name_set.n_hash[index];
71366+
71367+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
71368+ match = match->next;
71369+
71370+ return match;
71371+}
71372+
71373+static struct name_entry *
71374+lookup_name_entry(const char *name)
71375+{
71376+ return __lookup_name_entry(&running_polstate, name);
71377+}
71378+
71379+static struct name_entry *
71380+lookup_name_entry_create(const char *name)
71381+{
71382+ unsigned int len = strlen(name);
71383+ unsigned int key = full_name_hash(name, len);
71384+ unsigned int index = key % running_polstate.name_set.n_size;
71385+ struct name_entry *match;
71386+
71387+ match = running_polstate.name_set.n_hash[index];
71388+
71389+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
71390+ !match->deleted))
71391+ match = match->next;
71392+
71393+ if (match && match->deleted)
71394+ return match;
71395+
71396+ match = running_polstate.name_set.n_hash[index];
71397+
71398+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
71399+ match->deleted))
71400+ match = match->next;
71401+
71402+ if (match && !match->deleted)
71403+ return match;
71404+ else
71405+ return NULL;
71406+}
71407+
71408+static struct inodev_entry *
71409+lookup_inodev_entry(const u64 ino, const dev_t dev)
71410+{
71411+ unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
71412+ struct inodev_entry *match;
71413+
71414+ match = running_polstate.inodev_set.i_hash[index];
71415+
71416+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
71417+ match = match->next;
71418+
71419+ return match;
71420+}
71421+
71422+void
71423+__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
71424+{
71425+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
71426+ state->inodev_set.i_size);
71427+ struct inodev_entry **curr;
71428+
71429+ entry->prev = NULL;
71430+
71431+ curr = &state->inodev_set.i_hash[index];
71432+ if (*curr != NULL)
71433+ (*curr)->prev = entry;
71434+
71435+ entry->next = *curr;
71436+ *curr = entry;
71437+
71438+ return;
71439+}
71440+
71441+static void
71442+insert_inodev_entry(struct inodev_entry *entry)
71443+{
71444+ __insert_inodev_entry(&running_polstate, entry);
71445+}
71446+
71447+void
71448+insert_acl_obj_label(struct acl_object_label *obj,
71449+ struct acl_subject_label *subj)
71450+{
71451+ unsigned int index =
71452+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
71453+ struct acl_object_label **curr;
71454+
71455+ obj->prev = NULL;
71456+
71457+ curr = &subj->obj_hash[index];
71458+ if (*curr != NULL)
71459+ (*curr)->prev = obj;
71460+
71461+ obj->next = *curr;
71462+ *curr = obj;
71463+
71464+ return;
71465+}
71466+
71467+void
71468+insert_acl_subj_label(struct acl_subject_label *obj,
71469+ struct acl_role_label *role)
71470+{
71471+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
71472+ struct acl_subject_label **curr;
71473+
71474+ obj->prev = NULL;
71475+
71476+ curr = &role->subj_hash[index];
71477+ if (*curr != NULL)
71478+ (*curr)->prev = obj;
71479+
71480+ obj->next = *curr;
71481+ *curr = obj;
71482+
71483+ return;
71484+}
71485+
71486+/* derived from glibc fnmatch() 0: match, 1: no match*/
71487+
71488+static int
71489+glob_match(const char *p, const char *n)
71490+{
71491+ char c;
71492+
71493+ while ((c = *p++) != '\0') {
71494+ switch (c) {
71495+ case '?':
71496+ if (*n == '\0')
71497+ return 1;
71498+ else if (*n == '/')
71499+ return 1;
71500+ break;
71501+ case '\\':
71502+ if (*n != c)
71503+ return 1;
71504+ break;
71505+ case '*':
71506+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
71507+ if (*n == '/')
71508+ return 1;
71509+ else if (c == '?') {
71510+ if (*n == '\0')
71511+ return 1;
71512+ else
71513+ ++n;
71514+ }
71515+ }
71516+ if (c == '\0') {
71517+ return 0;
71518+ } else {
71519+ const char *endp;
71520+
71521+ if ((endp = strchr(n, '/')) == NULL)
71522+ endp = n + strlen(n);
71523+
71524+ if (c == '[') {
71525+ for (--p; n < endp; ++n)
71526+ if (!glob_match(p, n))
71527+ return 0;
71528+ } else if (c == '/') {
71529+ while (*n != '\0' && *n != '/')
71530+ ++n;
71531+ if (*n == '/' && !glob_match(p, n + 1))
71532+ return 0;
71533+ } else {
71534+ for (--p; n < endp; ++n)
71535+ if (*n == c && !glob_match(p, n))
71536+ return 0;
71537+ }
71538+
71539+ return 1;
71540+ }
71541+ case '[':
71542+ {
71543+ int not;
71544+ char cold;
71545+
71546+ if (*n == '\0' || *n == '/')
71547+ return 1;
71548+
71549+ not = (*p == '!' || *p == '^');
71550+ if (not)
71551+ ++p;
71552+
71553+ c = *p++;
71554+ for (;;) {
71555+ unsigned char fn = (unsigned char)*n;
71556+
71557+ if (c == '\0')
71558+ return 1;
71559+ else {
71560+ if (c == fn)
71561+ goto matched;
71562+ cold = c;
71563+ c = *p++;
71564+
71565+ if (c == '-' && *p != ']') {
71566+ unsigned char cend = *p++;
71567+
71568+ if (cend == '\0')
71569+ return 1;
71570+
71571+ if (cold <= fn && fn <= cend)
71572+ goto matched;
71573+
71574+ c = *p++;
71575+ }
71576+ }
71577+
71578+ if (c == ']')
71579+ break;
71580+ }
71581+ if (!not)
71582+ return 1;
71583+ break;
71584+ matched:
71585+ while (c != ']') {
71586+ if (c == '\0')
71587+ return 1;
71588+
71589+ c = *p++;
71590+ }
71591+ if (not)
71592+ return 1;
71593+ }
71594+ break;
71595+ default:
71596+ if (c != *n)
71597+ return 1;
71598+ }
71599+
71600+ ++n;
71601+ }
71602+
71603+ if (*n == '\0')
71604+ return 0;
71605+
71606+ if (*n == '/')
71607+ return 0;
71608+
71609+ return 1;
71610+}
71611+
71612+static struct acl_object_label *
71613+chk_glob_label(struct acl_object_label *globbed,
71614+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
71615+{
71616+ struct acl_object_label *tmp;
71617+
71618+ if (*path == NULL)
71619+ *path = gr_to_filename_nolock(dentry, mnt);
71620+
71621+ tmp = globbed;
71622+
71623+ while (tmp) {
71624+ if (!glob_match(tmp->filename, *path))
71625+ return tmp;
71626+ tmp = tmp->next;
71627+ }
71628+
71629+ return NULL;
71630+}
71631+
71632+static struct acl_object_label *
71633+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
71634+ const u64 curr_ino, const dev_t curr_dev,
71635+ const struct acl_subject_label *subj, char **path, const int checkglob)
71636+{
71637+ struct acl_subject_label *tmpsubj;
71638+ struct acl_object_label *retval;
71639+ struct acl_object_label *retval2;
71640+
71641+ tmpsubj = (struct acl_subject_label *) subj;
71642+ read_lock(&gr_inode_lock);
71643+ do {
71644+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
71645+ if (retval) {
71646+ if (checkglob && retval->globbed) {
71647+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
71648+ if (retval2)
71649+ retval = retval2;
71650+ }
71651+ break;
71652+ }
71653+ } while ((tmpsubj = tmpsubj->parent_subject));
71654+ read_unlock(&gr_inode_lock);
71655+
71656+ return retval;
71657+}
71658+
71659+static struct acl_object_label *
71660+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
71661+ struct dentry *curr_dentry,
71662+ const struct acl_subject_label *subj, char **path, const int checkglob)
71663+{
71664+ int newglob = checkglob;
71665+ u64 inode;
71666+ dev_t device;
71667+
71668+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
71669+ as we don't want a / * rule to match instead of the / object
71670+ don't do this for create lookups that call this function though, since they're looking up
71671+ on the parent and thus need globbing checks on all paths
71672+ */
71673+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
71674+ newglob = GR_NO_GLOB;
71675+
71676+ spin_lock(&curr_dentry->d_lock);
71677+ inode = __get_ino(curr_dentry);
71678+ device = __get_dev(curr_dentry);
71679+ spin_unlock(&curr_dentry->d_lock);
71680+
71681+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
71682+}
71683+
71684+#ifdef CONFIG_HUGETLBFS
71685+static inline bool
71686+is_hugetlbfs_mnt(const struct vfsmount *mnt)
71687+{
71688+ int i;
71689+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
71690+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
71691+ return true;
71692+ }
71693+
71694+ return false;
71695+}
71696+#endif
71697+
71698+static struct acl_object_label *
71699+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71700+ const struct acl_subject_label *subj, char *path, const int checkglob)
71701+{
71702+ struct dentry *dentry = (struct dentry *) l_dentry;
71703+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
71704+ struct mount *real_mnt = real_mount(mnt);
71705+ struct acl_object_label *retval;
71706+ struct dentry *parent;
71707+
71708+ read_seqlock_excl(&mount_lock);
71709+ write_seqlock(&rename_lock);
71710+
71711+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
71712+#ifdef CONFIG_NET
71713+ mnt == sock_mnt ||
71714+#endif
71715+#ifdef CONFIG_HUGETLBFS
71716+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
71717+#endif
71718+ /* ignore Eric Biederman */
71719+ IS_PRIVATE(l_dentry->d_inode))) {
71720+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
71721+ goto out;
71722+ }
71723+
71724+ for (;;) {
71725+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
71726+ break;
71727+
71728+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
71729+ if (!mnt_has_parent(real_mnt))
71730+ break;
71731+
71732+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
71733+ if (retval != NULL)
71734+ goto out;
71735+
71736+ dentry = real_mnt->mnt_mountpoint;
71737+ real_mnt = real_mnt->mnt_parent;
71738+ mnt = &real_mnt->mnt;
71739+ continue;
71740+ }
71741+
71742+ parent = dentry->d_parent;
71743+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
71744+ if (retval != NULL)
71745+ goto out;
71746+
71747+ dentry = parent;
71748+ }
71749+
71750+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
71751+
71752+ /* gr_real_root is pinned so we don't have to hold a reference */
71753+ if (retval == NULL)
71754+ retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
71755+out:
71756+ write_sequnlock(&rename_lock);
71757+ read_sequnlock_excl(&mount_lock);
71758+
71759+ BUG_ON(retval == NULL);
71760+
71761+ return retval;
71762+}
71763+
71764+static struct acl_object_label *
71765+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71766+ const struct acl_subject_label *subj)
71767+{
71768+ char *path = NULL;
71769+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
71770+}
71771+
71772+static struct acl_object_label *
71773+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71774+ const struct acl_subject_label *subj)
71775+{
71776+ char *path = NULL;
71777+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
71778+}
71779+
71780+static struct acl_object_label *
71781+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71782+ const struct acl_subject_label *subj, char *path)
71783+{
71784+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
71785+}
71786+
71787+struct acl_subject_label *
71788+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71789+ const struct acl_role_label *role)
71790+{
71791+ struct dentry *dentry = (struct dentry *) l_dentry;
71792+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
71793+ struct mount *real_mnt = real_mount(mnt);
71794+ struct acl_subject_label *retval;
71795+ struct dentry *parent;
71796+
71797+ read_seqlock_excl(&mount_lock);
71798+ write_seqlock(&rename_lock);
71799+
71800+ for (;;) {
71801+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
71802+ break;
71803+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
71804+ if (!mnt_has_parent(real_mnt))
71805+ break;
71806+
71807+ spin_lock(&dentry->d_lock);
71808+ read_lock(&gr_inode_lock);
71809+ retval =
71810+ lookup_acl_subj_label(__get_ino(dentry),
71811+ __get_dev(dentry), role);
71812+ read_unlock(&gr_inode_lock);
71813+ spin_unlock(&dentry->d_lock);
71814+ if (retval != NULL)
71815+ goto out;
71816+
71817+ dentry = real_mnt->mnt_mountpoint;
71818+ real_mnt = real_mnt->mnt_parent;
71819+ mnt = &real_mnt->mnt;
71820+ continue;
71821+ }
71822+
71823+ spin_lock(&dentry->d_lock);
71824+ read_lock(&gr_inode_lock);
71825+ retval = lookup_acl_subj_label(__get_ino(dentry),
71826+ __get_dev(dentry), role);
71827+ read_unlock(&gr_inode_lock);
71828+ parent = dentry->d_parent;
71829+ spin_unlock(&dentry->d_lock);
71830+
71831+ if (retval != NULL)
71832+ goto out;
71833+
71834+ dentry = parent;
71835+ }
71836+
71837+ spin_lock(&dentry->d_lock);
71838+ read_lock(&gr_inode_lock);
71839+ retval = lookup_acl_subj_label(__get_ino(dentry),
71840+ __get_dev(dentry), role);
71841+ read_unlock(&gr_inode_lock);
71842+ spin_unlock(&dentry->d_lock);
71843+
71844+ if (unlikely(retval == NULL)) {
71845+ /* gr_real_root is pinned, we don't need to hold a reference */
71846+ read_lock(&gr_inode_lock);
71847+ retval = lookup_acl_subj_label(__get_ino(gr_real_root.dentry),
71848+ __get_dev(gr_real_root.dentry), role);
71849+ read_unlock(&gr_inode_lock);
71850+ }
71851+out:
71852+ write_sequnlock(&rename_lock);
71853+ read_sequnlock_excl(&mount_lock);
71854+
71855+ BUG_ON(retval == NULL);
71856+
71857+ return retval;
71858+}
71859+
71860+void
71861+assign_special_role(const char *rolename)
71862+{
71863+ struct acl_object_label *obj;
71864+ struct acl_role_label *r;
71865+ struct acl_role_label *assigned = NULL;
71866+ struct task_struct *tsk;
71867+ struct file *filp;
71868+
71869+ FOR_EACH_ROLE_START(r)
71870+ if (!strcmp(rolename, r->rolename) &&
71871+ (r->roletype & GR_ROLE_SPECIAL)) {
71872+ assigned = r;
71873+ break;
71874+ }
71875+ FOR_EACH_ROLE_END(r)
71876+
71877+ if (!assigned)
71878+ return;
71879+
71880+ read_lock(&tasklist_lock);
71881+ read_lock(&grsec_exec_file_lock);
71882+
71883+ tsk = current->real_parent;
71884+ if (tsk == NULL)
71885+ goto out_unlock;
71886+
71887+ filp = tsk->exec_file;
71888+ if (filp == NULL)
71889+ goto out_unlock;
71890+
71891+ tsk->is_writable = 0;
71892+ tsk->inherited = 0;
71893+
71894+ tsk->acl_sp_role = 1;
71895+ tsk->acl_role_id = ++acl_sp_role_value;
71896+ tsk->role = assigned;
71897+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
71898+
71899+ /* ignore additional mmap checks for processes that are writable
71900+ by the default ACL */
71901+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
71902+ if (unlikely(obj->mode & GR_WRITE))
71903+ tsk->is_writable = 1;
71904+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
71905+ if (unlikely(obj->mode & GR_WRITE))
71906+ tsk->is_writable = 1;
71907+
71908+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71909+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
71910+ tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
71911+#endif
71912+
71913+out_unlock:
71914+ read_unlock(&grsec_exec_file_lock);
71915+ read_unlock(&tasklist_lock);
71916+ return;
71917+}
71918+
71919+
71920+static void
71921+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
71922+{
71923+ struct task_struct *task = current;
71924+ const struct cred *cred = current_cred();
71925+
71926+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
71927+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
71928+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
71929+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
71930+
71931+ return;
71932+}
71933+
71934+static void
71935+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
71936+{
71937+ struct task_struct *task = current;
71938+ const struct cred *cred = current_cred();
71939+
71940+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
71941+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
71942+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
71943+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
71944+
71945+ return;
71946+}
71947+
71948+static void
71949+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
71950+{
71951+ struct task_struct *task = current;
71952+ const struct cred *cred = current_cred();
71953+
71954+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
71955+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
71956+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
71957+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
71958+
71959+ return;
71960+}
71961+
71962+static void
71963+gr_set_proc_res(struct task_struct *task)
71964+{
71965+ struct acl_subject_label *proc;
71966+ unsigned short i;
71967+
71968+ proc = task->acl;
71969+
71970+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
71971+ return;
71972+
71973+ for (i = 0; i < RLIM_NLIMITS; i++) {
71974+ unsigned long rlim_cur, rlim_max;
71975+
71976+ if (!(proc->resmask & (1U << i)))
71977+ continue;
71978+
71979+ rlim_cur = proc->res[i].rlim_cur;
71980+ rlim_max = proc->res[i].rlim_max;
71981+
71982+ if (i == RLIMIT_NOFILE) {
71983+ unsigned long saved_sysctl_nr_open = sysctl_nr_open;
71984+ if (rlim_cur > saved_sysctl_nr_open)
71985+ rlim_cur = saved_sysctl_nr_open;
71986+ if (rlim_max > saved_sysctl_nr_open)
71987+ rlim_max = saved_sysctl_nr_open;
71988+ }
71989+
71990+ task->signal->rlim[i].rlim_cur = rlim_cur;
71991+ task->signal->rlim[i].rlim_max = rlim_max;
71992+
71993+ if (i == RLIMIT_CPU)
71994+ update_rlimit_cpu(task, rlim_cur);
71995+ }
71996+
71997+ return;
71998+}
71999+
72000+/* both of the below must be called with
72001+ rcu_read_lock();
72002+ read_lock(&tasklist_lock);
72003+ read_lock(&grsec_exec_file_lock);
72004+ except in the case of gr_set_role_label() (for __gr_get_subject_for_task)
72005+*/
72006+
72007+struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback)
72008+{
72009+ char *tmpname;
72010+ struct acl_subject_label *tmpsubj;
72011+ struct file *filp;
72012+ struct name_entry *nmatch;
72013+
72014+ filp = task->exec_file;
72015+ if (filp == NULL)
72016+ return NULL;
72017+
72018+ /* the following is to apply the correct subject
72019+ on binaries running when the RBAC system
72020+ is enabled, when the binaries have been
72021+ replaced or deleted since their execution
72022+ -----
72023+ when the RBAC system starts, the inode/dev
72024+ from exec_file will be one the RBAC system
72025+ is unaware of. It only knows the inode/dev
72026+ of the present file on disk, or the absence
72027+ of it.
72028+ */
72029+
72030+ if (filename)
72031+ nmatch = __lookup_name_entry(state, filename);
72032+ else {
72033+ preempt_disable();
72034+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
72035+
72036+ nmatch = __lookup_name_entry(state, tmpname);
72037+ preempt_enable();
72038+ }
72039+ tmpsubj = NULL;
72040+ if (nmatch) {
72041+ if (nmatch->deleted)
72042+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
72043+ else
72044+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
72045+ }
72046+ /* this also works for the reload case -- if we don't match a potentially inherited subject
72047+ then we fall back to a normal lookup based on the binary's ino/dev
72048+ */
72049+ if (tmpsubj == NULL && fallback)
72050+ tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
72051+
72052+ return tmpsubj;
72053+}
72054+
72055+static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename, int fallback)
72056+{
72057+ return __gr_get_subject_for_task(&running_polstate, task, filename, fallback);
72058+}
72059+
72060+void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
72061+{
72062+ struct acl_object_label *obj;
72063+ struct file *filp;
72064+
72065+ filp = task->exec_file;
72066+
72067+ task->acl = subj;
72068+ task->is_writable = 0;
72069+ /* ignore additional mmap checks for processes that are writable
72070+ by the default ACL */
72071+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
72072+ if (unlikely(obj->mode & GR_WRITE))
72073+ task->is_writable = 1;
72074+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
72075+ if (unlikely(obj->mode & GR_WRITE))
72076+ task->is_writable = 1;
72077+
72078+ gr_set_proc_res(task);
72079+
72080+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
72081+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
72082+#endif
72083+}
72084+
72085+static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
72086+{
72087+ __gr_apply_subject_to_task(&running_polstate, task, subj);
72088+}
72089+
72090+__u32
72091+gr_search_file(const struct dentry * dentry, const __u32 mode,
72092+ const struct vfsmount * mnt)
72093+{
72094+ __u32 retval = mode;
72095+ struct acl_subject_label *curracl;
72096+ struct acl_object_label *currobj;
72097+
72098+ if (unlikely(!(gr_status & GR_READY)))
72099+ return (mode & ~GR_AUDITS);
72100+
72101+ curracl = current->acl;
72102+
72103+ currobj = chk_obj_label(dentry, mnt, curracl);
72104+ retval = currobj->mode & mode;
72105+
72106+ /* if we're opening a specified transfer file for writing
72107+ (e.g. /dev/initctl), then transfer our role to init
72108+ */
72109+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
72110+ current->role->roletype & GR_ROLE_PERSIST)) {
72111+ struct task_struct *task = init_pid_ns.child_reaper;
72112+
72113+ if (task->role != current->role) {
72114+ struct acl_subject_label *subj;
72115+
72116+ task->acl_sp_role = 0;
72117+ task->acl_role_id = current->acl_role_id;
72118+ task->role = current->role;
72119+ rcu_read_lock();
72120+ read_lock(&grsec_exec_file_lock);
72121+ subj = gr_get_subject_for_task(task, NULL, 1);
72122+ gr_apply_subject_to_task(task, subj);
72123+ read_unlock(&grsec_exec_file_lock);
72124+ rcu_read_unlock();
72125+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
72126+ }
72127+ }
72128+
72129+ if (unlikely
72130+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
72131+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
72132+ __u32 new_mode = mode;
72133+
72134+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
72135+
72136+ retval = new_mode;
72137+
72138+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
72139+ new_mode |= GR_INHERIT;
72140+
72141+ if (!(mode & GR_NOLEARN))
72142+ gr_log_learn(dentry, mnt, new_mode);
72143+ }
72144+
72145+ return retval;
72146+}
72147+
72148+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
72149+ const struct dentry *parent,
72150+ const struct vfsmount *mnt)
72151+{
72152+ struct name_entry *match;
72153+ struct acl_object_label *matchpo;
72154+ struct acl_subject_label *curracl;
72155+ char *path;
72156+
72157+ if (unlikely(!(gr_status & GR_READY)))
72158+ return NULL;
72159+
72160+ preempt_disable();
72161+ path = gr_to_filename_rbac(new_dentry, mnt);
72162+ match = lookup_name_entry_create(path);
72163+
72164+ curracl = current->acl;
72165+
72166+ if (match) {
72167+ read_lock(&gr_inode_lock);
72168+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
72169+ read_unlock(&gr_inode_lock);
72170+
72171+ if (matchpo) {
72172+ preempt_enable();
72173+ return matchpo;
72174+ }
72175+ }
72176+
72177+ // lookup parent
72178+
72179+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
72180+
72181+ preempt_enable();
72182+ return matchpo;
72183+}
72184+
72185+__u32
72186+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
72187+ const struct vfsmount * mnt, const __u32 mode)
72188+{
72189+ struct acl_object_label *matchpo;
72190+ __u32 retval;
72191+
72192+ if (unlikely(!(gr_status & GR_READY)))
72193+ return (mode & ~GR_AUDITS);
72194+
72195+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
72196+
72197+ retval = matchpo->mode & mode;
72198+
72199+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
72200+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
72201+ __u32 new_mode = mode;
72202+
72203+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
72204+
72205+ gr_log_learn(new_dentry, mnt, new_mode);
72206+ return new_mode;
72207+ }
72208+
72209+ return retval;
72210+}
72211+
72212+__u32
72213+gr_check_link(const struct dentry * new_dentry,
72214+ const struct dentry * parent_dentry,
72215+ const struct vfsmount * parent_mnt,
72216+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
72217+{
72218+ struct acl_object_label *obj;
72219+ __u32 oldmode, newmode;
72220+ __u32 needmode;
72221+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
72222+ GR_DELETE | GR_INHERIT;
72223+
72224+ if (unlikely(!(gr_status & GR_READY)))
72225+ return (GR_CREATE | GR_LINK);
72226+
72227+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
72228+ oldmode = obj->mode;
72229+
72230+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
72231+ newmode = obj->mode;
72232+
72233+ needmode = newmode & checkmodes;
72234+
72235+ // old name for hardlink must have at least the permissions of the new name
72236+ if ((oldmode & needmode) != needmode)
72237+ goto bad;
72238+
72239+ // if old name had restrictions/auditing, make sure the new name does as well
72240+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
72241+
72242+ // don't allow hardlinking of suid/sgid/fcapped files without permission
72243+ if (is_privileged_binary(old_dentry))
72244+ needmode |= GR_SETID;
72245+
72246+ if ((newmode & needmode) != needmode)
72247+ goto bad;
72248+
72249+ // enforce minimum permissions
72250+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
72251+ return newmode;
72252+bad:
72253+ needmode = oldmode;
72254+ if (is_privileged_binary(old_dentry))
72255+ needmode |= GR_SETID;
72256+
72257+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
72258+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
72259+ return (GR_CREATE | GR_LINK);
72260+ } else if (newmode & GR_SUPPRESS)
72261+ return GR_SUPPRESS;
72262+ else
72263+ return 0;
72264+}
72265+
72266+int
72267+gr_check_hidden_task(const struct task_struct *task)
72268+{
72269+ if (unlikely(!(gr_status & GR_READY)))
72270+ return 0;
72271+
72272+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
72273+ return 1;
72274+
72275+ return 0;
72276+}
72277+
72278+int
72279+gr_check_protected_task(const struct task_struct *task)
72280+{
72281+ if (unlikely(!(gr_status & GR_READY) || !task))
72282+ return 0;
72283+
72284+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
72285+ task->acl != current->acl)
72286+ return 1;
72287+
72288+ return 0;
72289+}
72290+
72291+int
72292+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
72293+{
72294+ struct task_struct *p;
72295+ int ret = 0;
72296+
72297+ if (unlikely(!(gr_status & GR_READY) || !pid))
72298+ return ret;
72299+
72300+ read_lock(&tasklist_lock);
72301+ do_each_pid_task(pid, type, p) {
72302+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
72303+ p->acl != current->acl) {
72304+ ret = 1;
72305+ goto out;
72306+ }
72307+ } while_each_pid_task(pid, type, p);
72308+out:
72309+ read_unlock(&tasklist_lock);
72310+
72311+ return ret;
72312+}
72313+
72314+void
72315+gr_copy_label(struct task_struct *tsk)
72316+{
72317+ struct task_struct *p = current;
72318+
72319+ tsk->inherited = p->inherited;
72320+ tsk->acl_sp_role = 0;
72321+ tsk->acl_role_id = p->acl_role_id;
72322+ tsk->acl = p->acl;
72323+ tsk->role = p->role;
72324+ tsk->signal->used_accept = 0;
72325+ tsk->signal->curr_ip = p->signal->curr_ip;
72326+ tsk->signal->saved_ip = p->signal->saved_ip;
72327+ if (p->exec_file)
72328+ get_file(p->exec_file);
72329+ tsk->exec_file = p->exec_file;
72330+ tsk->is_writable = p->is_writable;
72331+ if (unlikely(p->signal->used_accept)) {
72332+ p->signal->curr_ip = 0;
72333+ p->signal->saved_ip = 0;
72334+ }
72335+
72336+ return;
72337+}
72338+
72339+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
72340+
72341+int
72342+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
72343+{
72344+ unsigned int i;
72345+ __u16 num;
72346+ uid_t *uidlist;
72347+ uid_t curuid;
72348+ int realok = 0;
72349+ int effectiveok = 0;
72350+ int fsok = 0;
72351+ uid_t globalreal, globaleffective, globalfs;
72352+
72353+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
72354+ struct user_struct *user;
72355+
72356+ if (!uid_valid(real))
72357+ goto skipit;
72358+
72359+ /* find user based on global namespace */
72360+
72361+ globalreal = GR_GLOBAL_UID(real);
72362+
72363+ user = find_user(make_kuid(&init_user_ns, globalreal));
72364+ if (user == NULL)
72365+ goto skipit;
72366+
72367+ if (gr_process_kernel_setuid_ban(user)) {
72368+ /* for find_user */
72369+ free_uid(user);
72370+ return 1;
72371+ }
72372+
72373+ /* for find_user */
72374+ free_uid(user);
72375+
72376+skipit:
72377+#endif
72378+
72379+ if (unlikely(!(gr_status & GR_READY)))
72380+ return 0;
72381+
72382+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
72383+ gr_log_learn_uid_change(real, effective, fs);
72384+
72385+ num = current->acl->user_trans_num;
72386+ uidlist = current->acl->user_transitions;
72387+
72388+ if (uidlist == NULL)
72389+ return 0;
72390+
72391+ if (!uid_valid(real)) {
72392+ realok = 1;
72393+ globalreal = (uid_t)-1;
72394+ } else {
72395+ globalreal = GR_GLOBAL_UID(real);
72396+ }
72397+ if (!uid_valid(effective)) {
72398+ effectiveok = 1;
72399+ globaleffective = (uid_t)-1;
72400+ } else {
72401+ globaleffective = GR_GLOBAL_UID(effective);
72402+ }
72403+ if (!uid_valid(fs)) {
72404+ fsok = 1;
72405+ globalfs = (uid_t)-1;
72406+ } else {
72407+ globalfs = GR_GLOBAL_UID(fs);
72408+ }
72409+
72410+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
72411+ for (i = 0; i < num; i++) {
72412+ curuid = uidlist[i];
72413+ if (globalreal == curuid)
72414+ realok = 1;
72415+ if (globaleffective == curuid)
72416+ effectiveok = 1;
72417+ if (globalfs == curuid)
72418+ fsok = 1;
72419+ }
72420+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
72421+ for (i = 0; i < num; i++) {
72422+ curuid = uidlist[i];
72423+ if (globalreal == curuid)
72424+ break;
72425+ if (globaleffective == curuid)
72426+ break;
72427+ if (globalfs == curuid)
72428+ break;
72429+ }
72430+ /* not in deny list */
72431+ if (i == num) {
72432+ realok = 1;
72433+ effectiveok = 1;
72434+ fsok = 1;
72435+ }
72436+ }
72437+
72438+ if (realok && effectiveok && fsok)
72439+ return 0;
72440+ else {
72441+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
72442+ return 1;
72443+ }
72444+}
72445+
72446+int
72447+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
72448+{
72449+ unsigned int i;
72450+ __u16 num;
72451+ gid_t *gidlist;
72452+ gid_t curgid;
72453+ int realok = 0;
72454+ int effectiveok = 0;
72455+ int fsok = 0;
72456+ gid_t globalreal, globaleffective, globalfs;
72457+
72458+ if (unlikely(!(gr_status & GR_READY)))
72459+ return 0;
72460+
72461+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
72462+ gr_log_learn_gid_change(real, effective, fs);
72463+
72464+ num = current->acl->group_trans_num;
72465+ gidlist = current->acl->group_transitions;
72466+
72467+ if (gidlist == NULL)
72468+ return 0;
72469+
72470+ if (!gid_valid(real)) {
72471+ realok = 1;
72472+ globalreal = (gid_t)-1;
72473+ } else {
72474+ globalreal = GR_GLOBAL_GID(real);
72475+ }
72476+ if (!gid_valid(effective)) {
72477+ effectiveok = 1;
72478+ globaleffective = (gid_t)-1;
72479+ } else {
72480+ globaleffective = GR_GLOBAL_GID(effective);
72481+ }
72482+ if (!gid_valid(fs)) {
72483+ fsok = 1;
72484+ globalfs = (gid_t)-1;
72485+ } else {
72486+ globalfs = GR_GLOBAL_GID(fs);
72487+ }
72488+
72489+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
72490+ for (i = 0; i < num; i++) {
72491+ curgid = gidlist[i];
72492+ if (globalreal == curgid)
72493+ realok = 1;
72494+ if (globaleffective == curgid)
72495+ effectiveok = 1;
72496+ if (globalfs == curgid)
72497+ fsok = 1;
72498+ }
72499+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
72500+ for (i = 0; i < num; i++) {
72501+ curgid = gidlist[i];
72502+ if (globalreal == curgid)
72503+ break;
72504+ if (globaleffective == curgid)
72505+ break;
72506+ if (globalfs == curgid)
72507+ break;
72508+ }
72509+ /* not in deny list */
72510+ if (i == num) {
72511+ realok = 1;
72512+ effectiveok = 1;
72513+ fsok = 1;
72514+ }
72515+ }
72516+
72517+ if (realok && effectiveok && fsok)
72518+ return 0;
72519+ else {
72520+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
72521+ return 1;
72522+ }
72523+}
72524+
72525+extern int gr_acl_is_capable(const int cap);
72526+
72527+void
72528+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
72529+{
72530+ struct acl_role_label *role = task->role;
72531+ struct acl_role_label *origrole = role;
72532+ struct acl_subject_label *subj = NULL;
72533+ struct acl_object_label *obj;
72534+ struct file *filp;
72535+ uid_t uid;
72536+ gid_t gid;
72537+
72538+ if (unlikely(!(gr_status & GR_READY)))
72539+ return;
72540+
72541+ uid = GR_GLOBAL_UID(kuid);
72542+ gid = GR_GLOBAL_GID(kgid);
72543+
72544+ filp = task->exec_file;
72545+
72546+ /* kernel process, we'll give them the kernel role */
72547+ if (unlikely(!filp)) {
72548+ task->role = running_polstate.kernel_role;
72549+ task->acl = running_polstate.kernel_role->root_label;
72550+ return;
72551+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
72552+ /* save the current ip at time of role lookup so that the proper
72553+ IP will be learned for role_allowed_ip */
72554+ task->signal->saved_ip = task->signal->curr_ip;
72555+ role = lookup_acl_role_label(task, uid, gid);
72556+ }
72557+
72558+ /* don't change the role if we're not a privileged process */
72559+ if (role && task->role != role &&
72560+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
72561+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
72562+ return;
72563+
72564+ task->role = role;
72565+
72566+ if (task->inherited) {
72567+ /* if we reached our subject through inheritance, then first see
72568+ if there's a subject of the same name in the new role that has
72569+ an object that would result in the same inherited subject
72570+ */
72571+ subj = gr_get_subject_for_task(task, task->acl->filename, 0);
72572+ if (subj) {
72573+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, subj);
72574+ if (!(obj->mode & GR_INHERIT))
72575+ subj = NULL;
72576+ }
72577+
72578+ }
72579+ if (subj == NULL) {
72580+ /* otherwise:
72581+ perform subject lookup in possibly new role
72582+ we can use this result below in the case where role == task->role
72583+ */
72584+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
72585+ }
72586+
72587+ /* if we changed uid/gid, but result in the same role
72588+ and are using inheritance, don't lose the inherited subject
72589+ if current subject is other than what normal lookup
72590+ would result in, we arrived via inheritance, don't
72591+ lose subject
72592+ */
72593+ if (role != origrole || (!(task->acl->mode & GR_INHERITLEARN) &&
72594+ (subj == task->acl)))
72595+ task->acl = subj;
72596+
72597+ /* leave task->inherited unaffected */
72598+
72599+ task->is_writable = 0;
72600+
72601+ /* ignore additional mmap checks for processes that are writable
72602+ by the default ACL */
72603+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
72604+ if (unlikely(obj->mode & GR_WRITE))
72605+ task->is_writable = 1;
72606+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
72607+ if (unlikely(obj->mode & GR_WRITE))
72608+ task->is_writable = 1;
72609+
72610+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
72611+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
72612+#endif
72613+
72614+ gr_set_proc_res(task);
72615+
72616+ return;
72617+}
72618+
72619+int
72620+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
72621+ const int unsafe_flags)
72622+{
72623+ struct task_struct *task = current;
72624+ struct acl_subject_label *newacl;
72625+ struct acl_object_label *obj;
72626+ __u32 retmode;
72627+
72628+ if (unlikely(!(gr_status & GR_READY)))
72629+ return 0;
72630+
72631+ newacl = chk_subj_label(dentry, mnt, task->role);
72632+
72633+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
72634+ did an exec
72635+ */
72636+ rcu_read_lock();
72637+ read_lock(&tasklist_lock);
72638+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
72639+ (task->parent->acl->mode & GR_POVERRIDE))) {
72640+ read_unlock(&tasklist_lock);
72641+ rcu_read_unlock();
72642+ goto skip_check;
72643+ }
72644+ read_unlock(&tasklist_lock);
72645+ rcu_read_unlock();
72646+
72647+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
72648+ !(task->role->roletype & GR_ROLE_GOD) &&
72649+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
72650+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
72651+ if (unsafe_flags & LSM_UNSAFE_SHARE)
72652+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
72653+ else
72654+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
72655+ return -EACCES;
72656+ }
72657+
72658+skip_check:
72659+
72660+ obj = chk_obj_label(dentry, mnt, task->acl);
72661+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
72662+
72663+ if (!(task->acl->mode & GR_INHERITLEARN) &&
72664+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
72665+ if (obj->nested)
72666+ task->acl = obj->nested;
72667+ else
72668+ task->acl = newacl;
72669+ task->inherited = 0;
72670+ } else {
72671+ task->inherited = 1;
72672+ if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
72673+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
72674+ }
72675+
72676+ task->is_writable = 0;
72677+
72678+ /* ignore additional mmap checks for processes that are writable
72679+ by the default ACL */
72680+ obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
72681+ if (unlikely(obj->mode & GR_WRITE))
72682+ task->is_writable = 1;
72683+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
72684+ if (unlikely(obj->mode & GR_WRITE))
72685+ task->is_writable = 1;
72686+
72687+ gr_set_proc_res(task);
72688+
72689+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
72690+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
72691+#endif
72692+ return 0;
72693+}
72694+
72695+/* always called with valid inodev ptr */
72696+static void
72697+do_handle_delete(struct inodev_entry *inodev, const u64 ino, const dev_t dev)
72698+{
72699+ struct acl_object_label *matchpo;
72700+ struct acl_subject_label *matchps;
72701+ struct acl_subject_label *subj;
72702+ struct acl_role_label *role;
72703+ unsigned int x;
72704+
72705+ FOR_EACH_ROLE_START(role)
72706+ FOR_EACH_SUBJECT_START(role, subj, x)
72707+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
72708+ matchpo->mode |= GR_DELETED;
72709+ FOR_EACH_SUBJECT_END(subj,x)
72710+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
72711+ /* nested subjects aren't in the role's subj_hash table */
72712+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
72713+ matchpo->mode |= GR_DELETED;
72714+ FOR_EACH_NESTED_SUBJECT_END(subj)
72715+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
72716+ matchps->mode |= GR_DELETED;
72717+ FOR_EACH_ROLE_END(role)
72718+
72719+ inodev->nentry->deleted = 1;
72720+
72721+ return;
72722+}
72723+
72724+void
72725+gr_handle_delete(const u64 ino, const dev_t dev)
72726+{
72727+ struct inodev_entry *inodev;
72728+
72729+ if (unlikely(!(gr_status & GR_READY)))
72730+ return;
72731+
72732+ write_lock(&gr_inode_lock);
72733+ inodev = lookup_inodev_entry(ino, dev);
72734+ if (inodev != NULL)
72735+ do_handle_delete(inodev, ino, dev);
72736+ write_unlock(&gr_inode_lock);
72737+
72738+ return;
72739+}
72740+
72741+static void
72742+update_acl_obj_label(const u64 oldinode, const dev_t olddevice,
72743+ const u64 newinode, const dev_t newdevice,
72744+ struct acl_subject_label *subj)
72745+{
72746+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
72747+ struct acl_object_label *match;
72748+
72749+ match = subj->obj_hash[index];
72750+
72751+ while (match && (match->inode != oldinode ||
72752+ match->device != olddevice ||
72753+ !(match->mode & GR_DELETED)))
72754+ match = match->next;
72755+
72756+ if (match && (match->inode == oldinode)
72757+ && (match->device == olddevice)
72758+ && (match->mode & GR_DELETED)) {
72759+ if (match->prev == NULL) {
72760+ subj->obj_hash[index] = match->next;
72761+ if (match->next != NULL)
72762+ match->next->prev = NULL;
72763+ } else {
72764+ match->prev->next = match->next;
72765+ if (match->next != NULL)
72766+ match->next->prev = match->prev;
72767+ }
72768+ match->prev = NULL;
72769+ match->next = NULL;
72770+ match->inode = newinode;
72771+ match->device = newdevice;
72772+ match->mode &= ~GR_DELETED;
72773+
72774+ insert_acl_obj_label(match, subj);
72775+ }
72776+
72777+ return;
72778+}
72779+
72780+static void
72781+update_acl_subj_label(const u64 oldinode, const dev_t olddevice,
72782+ const u64 newinode, const dev_t newdevice,
72783+ struct acl_role_label *role)
72784+{
72785+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
72786+ struct acl_subject_label *match;
72787+
72788+ match = role->subj_hash[index];
72789+
72790+ while (match && (match->inode != oldinode ||
72791+ match->device != olddevice ||
72792+ !(match->mode & GR_DELETED)))
72793+ match = match->next;
72794+
72795+ if (match && (match->inode == oldinode)
72796+ && (match->device == olddevice)
72797+ && (match->mode & GR_DELETED)) {
72798+ if (match->prev == NULL) {
72799+ role->subj_hash[index] = match->next;
72800+ if (match->next != NULL)
72801+ match->next->prev = NULL;
72802+ } else {
72803+ match->prev->next = match->next;
72804+ if (match->next != NULL)
72805+ match->next->prev = match->prev;
72806+ }
72807+ match->prev = NULL;
72808+ match->next = NULL;
72809+ match->inode = newinode;
72810+ match->device = newdevice;
72811+ match->mode &= ~GR_DELETED;
72812+
72813+ insert_acl_subj_label(match, role);
72814+ }
72815+
72816+ return;
72817+}
72818+
72819+static void
72820+update_inodev_entry(const u64 oldinode, const dev_t olddevice,
72821+ const u64 newinode, const dev_t newdevice)
72822+{
72823+ unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
72824+ struct inodev_entry *match;
72825+
72826+ match = running_polstate.inodev_set.i_hash[index];
72827+
72828+ while (match && (match->nentry->inode != oldinode ||
72829+ match->nentry->device != olddevice || !match->nentry->deleted))
72830+ match = match->next;
72831+
72832+ if (match && (match->nentry->inode == oldinode)
72833+ && (match->nentry->device == olddevice) &&
72834+ match->nentry->deleted) {
72835+ if (match->prev == NULL) {
72836+ running_polstate.inodev_set.i_hash[index] = match->next;
72837+ if (match->next != NULL)
72838+ match->next->prev = NULL;
72839+ } else {
72840+ match->prev->next = match->next;
72841+ if (match->next != NULL)
72842+ match->next->prev = match->prev;
72843+ }
72844+ match->prev = NULL;
72845+ match->next = NULL;
72846+ match->nentry->inode = newinode;
72847+ match->nentry->device = newdevice;
72848+ match->nentry->deleted = 0;
72849+
72850+ insert_inodev_entry(match);
72851+ }
72852+
72853+ return;
72854+}
72855+
72856+static void
72857+__do_handle_create(const struct name_entry *matchn, u64 ino, dev_t dev)
72858+{
72859+ struct acl_subject_label *subj;
72860+ struct acl_role_label *role;
72861+ unsigned int x;
72862+
72863+ FOR_EACH_ROLE_START(role)
72864+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
72865+
72866+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
72867+ if ((subj->inode == ino) && (subj->device == dev)) {
72868+ subj->inode = ino;
72869+ subj->device = dev;
72870+ }
72871+ /* nested subjects aren't in the role's subj_hash table */
72872+ update_acl_obj_label(matchn->inode, matchn->device,
72873+ ino, dev, subj);
72874+ FOR_EACH_NESTED_SUBJECT_END(subj)
72875+ FOR_EACH_SUBJECT_START(role, subj, x)
72876+ update_acl_obj_label(matchn->inode, matchn->device,
72877+ ino, dev, subj);
72878+ FOR_EACH_SUBJECT_END(subj,x)
72879+ FOR_EACH_ROLE_END(role)
72880+
72881+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
72882+
72883+ return;
72884+}
72885+
72886+static void
72887+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
72888+ const struct vfsmount *mnt)
72889+{
72890+ u64 ino = __get_ino(dentry);
72891+ dev_t dev = __get_dev(dentry);
72892+
72893+ __do_handle_create(matchn, ino, dev);
72894+
72895+ return;
72896+}
72897+
72898+void
72899+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
72900+{
72901+ struct name_entry *matchn;
72902+
72903+ if (unlikely(!(gr_status & GR_READY)))
72904+ return;
72905+
72906+ preempt_disable();
72907+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
72908+
72909+ if (unlikely((unsigned long)matchn)) {
72910+ write_lock(&gr_inode_lock);
72911+ do_handle_create(matchn, dentry, mnt);
72912+ write_unlock(&gr_inode_lock);
72913+ }
72914+ preempt_enable();
72915+
72916+ return;
72917+}
72918+
72919+void
72920+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
72921+{
72922+ struct name_entry *matchn;
72923+
72924+ if (unlikely(!(gr_status & GR_READY)))
72925+ return;
72926+
72927+ preempt_disable();
72928+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
72929+
72930+ if (unlikely((unsigned long)matchn)) {
72931+ write_lock(&gr_inode_lock);
72932+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
72933+ write_unlock(&gr_inode_lock);
72934+ }
72935+ preempt_enable();
72936+
72937+ return;
72938+}
72939+
72940+void
72941+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
72942+ struct dentry *old_dentry,
72943+ struct dentry *new_dentry,
72944+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
72945+{
72946+ struct name_entry *matchn;
72947+ struct name_entry *matchn2 = NULL;
72948+ struct inodev_entry *inodev;
72949+ struct inode *inode = new_dentry->d_inode;
72950+ u64 old_ino = __get_ino(old_dentry);
72951+ dev_t old_dev = __get_dev(old_dentry);
72952+ unsigned int exchange = flags & RENAME_EXCHANGE;
72953+
72954+ /* vfs_rename swaps the name and parent link for old_dentry and
72955+ new_dentry
72956+ at this point, old_dentry has the new name, parent link, and inode
72957+ for the renamed file
72958+ if a file is being replaced by a rename, new_dentry has the inode
72959+ and name for the replaced file
72960+ */
72961+
72962+ if (unlikely(!(gr_status & GR_READY)))
72963+ return;
72964+
72965+ preempt_disable();
72966+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
72967+
72968+ /* exchange cases:
72969+ a filename exists for the source, but not dest
72970+ do a recreate on source
72971+ a filename exists for the dest, but not source
72972+ do a recreate on dest
72973+ a filename exists for both source and dest
72974+ delete source and dest, then create source and dest
72975+ a filename exists for neither source nor dest
72976+ no updates needed
72977+
72978+ the name entry lookups get us the old inode/dev associated with
72979+ each name, so do the deletes first (if possible) so that when
72980+ we do the create, we pick up on the right entries
72981+ */
72982+
72983+ if (exchange)
72984+ matchn2 = lookup_name_entry(gr_to_filename_rbac(new_dentry, mnt));
72985+
72986+ /* we wouldn't have to check d_inode if it weren't for
72987+ NFS silly-renaming
72988+ */
72989+
72990+ write_lock(&gr_inode_lock);
72991+ if (unlikely((replace || exchange) && inode)) {
72992+ u64 new_ino = __get_ino(new_dentry);
72993+ dev_t new_dev = __get_dev(new_dentry);
72994+
72995+ inodev = lookup_inodev_entry(new_ino, new_dev);
72996+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
72997+ do_handle_delete(inodev, new_ino, new_dev);
72998+ }
72999+
73000+ inodev = lookup_inodev_entry(old_ino, old_dev);
73001+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
73002+ do_handle_delete(inodev, old_ino, old_dev);
73003+
73004+ if (unlikely(matchn != NULL))
73005+ do_handle_create(matchn, old_dentry, mnt);
73006+
73007+ if (unlikely(matchn2 != NULL))
73008+ do_handle_create(matchn2, new_dentry, mnt);
73009+
73010+ write_unlock(&gr_inode_lock);
73011+ preempt_enable();
73012+
73013+ return;
73014+}
73015+
73016+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
73017+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
73018+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
73019+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
73020+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
73021+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
73022+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
73023+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
73024+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
73025+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
73026+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
73027+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
73028+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
73029+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
73030+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
73031+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
73032+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
73033+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
73034+};
73035+
73036+void
73037+gr_learn_resource(const struct task_struct *task,
73038+ const int res, const unsigned long wanted, const int gt)
73039+{
73040+ struct acl_subject_label *acl;
73041+ const struct cred *cred;
73042+
73043+ if (unlikely((gr_status & GR_READY) &&
73044+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
73045+ goto skip_reslog;
73046+
73047+ gr_log_resource(task, res, wanted, gt);
73048+skip_reslog:
73049+
73050+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
73051+ return;
73052+
73053+ acl = task->acl;
73054+
73055+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
73056+ !(acl->resmask & (1U << (unsigned short) res))))
73057+ return;
73058+
73059+ if (wanted >= acl->res[res].rlim_cur) {
73060+ unsigned long res_add;
73061+
73062+ res_add = wanted + res_learn_bumps[res];
73063+
73064+ acl->res[res].rlim_cur = res_add;
73065+
73066+ if (wanted > acl->res[res].rlim_max)
73067+ acl->res[res].rlim_max = res_add;
73068+
73069+ /* only log the subject filename, since resource logging is supported for
73070+ single-subject learning only */
73071+ rcu_read_lock();
73072+ cred = __task_cred(task);
73073+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
73074+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
73075+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
73076+ "", (unsigned long) res, &task->signal->saved_ip);
73077+ rcu_read_unlock();
73078+ }
73079+
73080+ return;
73081+}
73082+EXPORT_SYMBOL_GPL(gr_learn_resource);
73083+#endif
73084+
73085+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
73086+void
73087+pax_set_initial_flags(struct linux_binprm *bprm)
73088+{
73089+ struct task_struct *task = current;
73090+ struct acl_subject_label *proc;
73091+ unsigned long flags;
73092+
73093+ if (unlikely(!(gr_status & GR_READY)))
73094+ return;
73095+
73096+ flags = pax_get_flags(task);
73097+
73098+ proc = task->acl;
73099+
73100+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
73101+ flags &= ~MF_PAX_PAGEEXEC;
73102+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
73103+ flags &= ~MF_PAX_SEGMEXEC;
73104+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
73105+ flags &= ~MF_PAX_RANDMMAP;
73106+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
73107+ flags &= ~MF_PAX_EMUTRAMP;
73108+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
73109+ flags &= ~MF_PAX_MPROTECT;
73110+
73111+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
73112+ flags |= MF_PAX_PAGEEXEC;
73113+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
73114+ flags |= MF_PAX_SEGMEXEC;
73115+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
73116+ flags |= MF_PAX_RANDMMAP;
73117+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
73118+ flags |= MF_PAX_EMUTRAMP;
73119+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
73120+ flags |= MF_PAX_MPROTECT;
73121+
73122+ pax_set_flags(task, flags);
73123+
73124+ return;
73125+}
73126+#endif
73127+
73128+int
73129+gr_handle_proc_ptrace(struct task_struct *task)
73130+{
73131+ struct file *filp;
73132+ struct task_struct *tmp = task;
73133+ struct task_struct *curtemp = current;
73134+ __u32 retmode;
73135+
73136+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
73137+ if (unlikely(!(gr_status & GR_READY)))
73138+ return 0;
73139+#endif
73140+
73141+ read_lock(&tasklist_lock);
73142+ read_lock(&grsec_exec_file_lock);
73143+ filp = task->exec_file;
73144+
73145+ while (task_pid_nr(tmp) > 0) {
73146+ if (tmp == curtemp)
73147+ break;
73148+ tmp = tmp->real_parent;
73149+ }
73150+
73151+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
73152+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
73153+ read_unlock(&grsec_exec_file_lock);
73154+ read_unlock(&tasklist_lock);
73155+ return 1;
73156+ }
73157+
73158+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
73159+ if (!(gr_status & GR_READY)) {
73160+ read_unlock(&grsec_exec_file_lock);
73161+ read_unlock(&tasklist_lock);
73162+ return 0;
73163+ }
73164+#endif
73165+
73166+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
73167+ read_unlock(&grsec_exec_file_lock);
73168+ read_unlock(&tasklist_lock);
73169+
73170+ if (retmode & GR_NOPTRACE)
73171+ return 1;
73172+
73173+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
73174+ && (current->acl != task->acl || (current->acl != current->role->root_label
73175+ && task_pid_nr(current) != task_pid_nr(task))))
73176+ return 1;
73177+
73178+ return 0;
73179+}
73180+
73181+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
73182+{
73183+ if (unlikely(!(gr_status & GR_READY)))
73184+ return;
73185+
73186+ if (!(current->role->roletype & GR_ROLE_GOD))
73187+ return;
73188+
73189+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
73190+ p->role->rolename, gr_task_roletype_to_char(p),
73191+ p->acl->filename);
73192+}
73193+
73194+int
73195+gr_handle_ptrace(struct task_struct *task, const long request)
73196+{
73197+ struct task_struct *tmp = task;
73198+ struct task_struct *curtemp = current;
73199+ __u32 retmode;
73200+
73201+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
73202+ if (unlikely(!(gr_status & GR_READY)))
73203+ return 0;
73204+#endif
73205+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
73206+ read_lock(&tasklist_lock);
73207+ while (task_pid_nr(tmp) > 0) {
73208+ if (tmp == curtemp)
73209+ break;
73210+ tmp = tmp->real_parent;
73211+ }
73212+
73213+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
73214+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
73215+ read_unlock(&tasklist_lock);
73216+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
73217+ return 1;
73218+ }
73219+ read_unlock(&tasklist_lock);
73220+ }
73221+
73222+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
73223+ if (!(gr_status & GR_READY))
73224+ return 0;
73225+#endif
73226+
73227+ read_lock(&grsec_exec_file_lock);
73228+ if (unlikely(!task->exec_file)) {
73229+ read_unlock(&grsec_exec_file_lock);
73230+ return 0;
73231+ }
73232+
73233+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
73234+ read_unlock(&grsec_exec_file_lock);
73235+
73236+ if (retmode & GR_NOPTRACE) {
73237+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
73238+ return 1;
73239+ }
73240+
73241+ if (retmode & GR_PTRACERD) {
73242+ switch (request) {
73243+ case PTRACE_SEIZE:
73244+ case PTRACE_POKETEXT:
73245+ case PTRACE_POKEDATA:
73246+ case PTRACE_POKEUSR:
73247+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
73248+ case PTRACE_SETREGS:
73249+ case PTRACE_SETFPREGS:
73250+#endif
73251+#ifdef CONFIG_X86
73252+ case PTRACE_SETFPXREGS:
73253+#endif
73254+#ifdef CONFIG_ALTIVEC
73255+ case PTRACE_SETVRREGS:
73256+#endif
73257+ return 1;
73258+ default:
73259+ return 0;
73260+ }
73261+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
73262+ !(current->role->roletype & GR_ROLE_GOD) &&
73263+ (current->acl != task->acl)) {
73264+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
73265+ return 1;
73266+ }
73267+
73268+ return 0;
73269+}
73270+
73271+static int is_writable_mmap(const struct file *filp)
73272+{
73273+ struct task_struct *task = current;
73274+ struct acl_object_label *obj, *obj2;
73275+
73276+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
73277+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
73278+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
73279+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
73280+ task->role->root_label);
73281+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
73282+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
73283+ return 1;
73284+ }
73285+ }
73286+ return 0;
73287+}
73288+
73289+int
73290+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
73291+{
73292+ __u32 mode;
73293+
73294+ if (unlikely(!file || !(prot & PROT_EXEC)))
73295+ return 1;
73296+
73297+ if (is_writable_mmap(file))
73298+ return 0;
73299+
73300+ mode =
73301+ gr_search_file(file->f_path.dentry,
73302+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
73303+ file->f_path.mnt);
73304+
73305+ if (!gr_tpe_allow(file))
73306+ return 0;
73307+
73308+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
73309+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
73310+ return 0;
73311+ } else if (unlikely(!(mode & GR_EXEC))) {
73312+ return 0;
73313+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
73314+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
73315+ return 1;
73316+ }
73317+
73318+ return 1;
73319+}
73320+
73321+int
73322+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
73323+{
73324+ __u32 mode;
73325+
73326+ if (unlikely(!file || !(prot & PROT_EXEC)))
73327+ return 1;
73328+
73329+ if (is_writable_mmap(file))
73330+ return 0;
73331+
73332+ mode =
73333+ gr_search_file(file->f_path.dentry,
73334+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
73335+ file->f_path.mnt);
73336+
73337+ if (!gr_tpe_allow(file))
73338+ return 0;
73339+
73340+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
73341+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
73342+ return 0;
73343+ } else if (unlikely(!(mode & GR_EXEC))) {
73344+ return 0;
73345+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
73346+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
73347+ return 1;
73348+ }
73349+
73350+ return 1;
73351+}
73352+
73353+void
73354+gr_acl_handle_psacct(struct task_struct *task, const long code)
73355+{
73356+ unsigned long runtime, cputime;
73357+ cputime_t utime, stime;
73358+ unsigned int wday, cday;
73359+ __u8 whr, chr;
73360+ __u8 wmin, cmin;
73361+ __u8 wsec, csec;
73362+ struct timespec curtime, starttime;
73363+
73364+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
73365+ !(task->acl->mode & GR_PROCACCT)))
73366+ return;
73367+
73368+ curtime = ns_to_timespec(ktime_get_ns());
73369+ starttime = ns_to_timespec(task->start_time);
73370+ runtime = curtime.tv_sec - starttime.tv_sec;
73371+ wday = runtime / (60 * 60 * 24);
73372+ runtime -= wday * (60 * 60 * 24);
73373+ whr = runtime / (60 * 60);
73374+ runtime -= whr * (60 * 60);
73375+ wmin = runtime / 60;
73376+ runtime -= wmin * 60;
73377+ wsec = runtime;
73378+
73379+ task_cputime(task, &utime, &stime);
73380+ cputime = cputime_to_secs(utime + stime);
73381+ cday = cputime / (60 * 60 * 24);
73382+ cputime -= cday * (60 * 60 * 24);
73383+ chr = cputime / (60 * 60);
73384+ cputime -= chr * (60 * 60);
73385+ cmin = cputime / 60;
73386+ cputime -= cmin * 60;
73387+ csec = cputime;
73388+
73389+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
73390+
73391+ return;
73392+}
73393+
73394+#ifdef CONFIG_TASKSTATS
73395+int gr_is_taskstats_denied(int pid)
73396+{
73397+ struct task_struct *task;
73398+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73399+ const struct cred *cred;
73400+#endif
73401+ int ret = 0;
73402+
73403+ /* restrict taskstats viewing to un-chrooted root users
73404+ who have the 'view' subject flag if the RBAC system is enabled
73405+ */
73406+
73407+ rcu_read_lock();
73408+ read_lock(&tasklist_lock);
73409+ task = find_task_by_vpid(pid);
73410+ if (task) {
73411+#ifdef CONFIG_GRKERNSEC_CHROOT
73412+ if (proc_is_chrooted(task))
73413+ ret = -EACCES;
73414+#endif
73415+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73416+ cred = __task_cred(task);
73417+#ifdef CONFIG_GRKERNSEC_PROC_USER
73418+ if (gr_is_global_nonroot(cred->uid))
73419+ ret = -EACCES;
73420+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73421+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
73422+ ret = -EACCES;
73423+#endif
73424+#endif
73425+ if (gr_status & GR_READY) {
73426+ if (!(task->acl->mode & GR_VIEW))
73427+ ret = -EACCES;
73428+ }
73429+ } else
73430+ ret = -ENOENT;
73431+
73432+ read_unlock(&tasklist_lock);
73433+ rcu_read_unlock();
73434+
73435+ return ret;
73436+}
73437+#endif
73438+
73439+/* AUXV entries are filled via a descendant of search_binary_handler
73440+ after we've already applied the subject for the target
73441+*/
73442+int gr_acl_enable_at_secure(void)
73443+{
73444+ if (unlikely(!(gr_status & GR_READY)))
73445+ return 0;
73446+
73447+ if (current->acl->mode & GR_ATSECURE)
73448+ return 1;
73449+
73450+ return 0;
73451+}
73452+
73453+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const u64 ino)
73454+{
73455+ struct task_struct *task = current;
73456+ struct dentry *dentry = file->f_path.dentry;
73457+ struct vfsmount *mnt = file->f_path.mnt;
73458+ struct acl_object_label *obj, *tmp;
73459+ struct acl_subject_label *subj;
73460+ unsigned int bufsize;
73461+ int is_not_root;
73462+ char *path;
73463+ dev_t dev = __get_dev(dentry);
73464+
73465+ if (unlikely(!(gr_status & GR_READY)))
73466+ return 1;
73467+
73468+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
73469+ return 1;
73470+
73471+ /* ignore Eric Biederman */
73472+ if (IS_PRIVATE(dentry->d_inode))
73473+ return 1;
73474+
73475+ subj = task->acl;
73476+ read_lock(&gr_inode_lock);
73477+ do {
73478+ obj = lookup_acl_obj_label(ino, dev, subj);
73479+ if (obj != NULL) {
73480+ read_unlock(&gr_inode_lock);
73481+ return (obj->mode & GR_FIND) ? 1 : 0;
73482+ }
73483+ } while ((subj = subj->parent_subject));
73484+ read_unlock(&gr_inode_lock);
73485+
73486+ /* this is purely an optimization since we're looking for an object
73487+ for the directory we're doing a readdir on
73488+ if it's possible for any globbed object to match the entry we're
73489+ filling into the directory, then the object we find here will be
73490+ an anchor point with attached globbed objects
73491+ */
73492+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
73493+ if (obj->globbed == NULL)
73494+ return (obj->mode & GR_FIND) ? 1 : 0;
73495+
73496+ is_not_root = ((obj->filename[0] == '/') &&
73497+ (obj->filename[1] == '\0')) ? 0 : 1;
73498+ bufsize = PAGE_SIZE - namelen - is_not_root;
73499+
73500+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
73501+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
73502+ return 1;
73503+
73504+ preempt_disable();
73505+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
73506+ bufsize);
73507+
73508+ bufsize = strlen(path);
73509+
73510+ /* if base is "/", don't append an additional slash */
73511+ if (is_not_root)
73512+ *(path + bufsize) = '/';
73513+ memcpy(path + bufsize + is_not_root, name, namelen);
73514+ *(path + bufsize + namelen + is_not_root) = '\0';
73515+
73516+ tmp = obj->globbed;
73517+ while (tmp) {
73518+ if (!glob_match(tmp->filename, path)) {
73519+ preempt_enable();
73520+ return (tmp->mode & GR_FIND) ? 1 : 0;
73521+ }
73522+ tmp = tmp->next;
73523+ }
73524+ preempt_enable();
73525+ return (obj->mode & GR_FIND) ? 1 : 0;
73526+}
73527+
73528+void gr_put_exec_file(struct task_struct *task)
73529+{
73530+ struct file *filp;
73531+
73532+ write_lock(&grsec_exec_file_lock);
73533+ filp = task->exec_file;
73534+ task->exec_file = NULL;
73535+ write_unlock(&grsec_exec_file_lock);
73536+
73537+ if (filp)
73538+ fput(filp);
73539+
73540+ return;
73541+}
73542+
73543+
73544+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
73545+EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
73546+#endif
73547+#ifdef CONFIG_SECURITY
73548+EXPORT_SYMBOL_GPL(gr_check_user_change);
73549+EXPORT_SYMBOL_GPL(gr_check_group_change);
73550+#endif
73551+
73552diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
73553new file mode 100644
73554index 0000000..9adc75c
73555--- /dev/null
73556+++ b/grsecurity/gracl_alloc.c
73557@@ -0,0 +1,105 @@
73558+#include <linux/kernel.h>
73559+#include <linux/mm.h>
73560+#include <linux/slab.h>
73561+#include <linux/vmalloc.h>
73562+#include <linux/gracl.h>
73563+#include <linux/grsecurity.h>
73564+
73565+static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
73566+struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
73567+
73568+static int
73569+alloc_pop(void)
73570+{
73571+ if (current_alloc_state->alloc_stack_next == 1)
73572+ return 0;
73573+
73574+ kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
73575+
73576+ current_alloc_state->alloc_stack_next--;
73577+
73578+ return 1;
73579+}
73580+
73581+static int
73582+alloc_push(void *buf)
73583+{
73584+ if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
73585+ return 1;
73586+
73587+ current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
73588+
73589+ current_alloc_state->alloc_stack_next++;
73590+
73591+ return 0;
73592+}
73593+
73594+void *
73595+acl_alloc(unsigned long len)
73596+{
73597+ void *ret = NULL;
73598+
73599+ if (!len || len > PAGE_SIZE)
73600+ goto out;
73601+
73602+ ret = kmalloc(len, GFP_KERNEL);
73603+
73604+ if (ret) {
73605+ if (alloc_push(ret)) {
73606+ kfree(ret);
73607+ ret = NULL;
73608+ }
73609+ }
73610+
73611+out:
73612+ return ret;
73613+}
73614+
73615+void *
73616+acl_alloc_num(unsigned long num, unsigned long len)
73617+{
73618+ if (!len || (num > (PAGE_SIZE / len)))
73619+ return NULL;
73620+
73621+ return acl_alloc(num * len);
73622+}
73623+
73624+void
73625+acl_free_all(void)
73626+{
73627+ if (!current_alloc_state->alloc_stack)
73628+ return;
73629+
73630+ while (alloc_pop()) ;
73631+
73632+ if (current_alloc_state->alloc_stack) {
73633+ if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
73634+ kfree(current_alloc_state->alloc_stack);
73635+ else
73636+ vfree(current_alloc_state->alloc_stack);
73637+ }
73638+
73639+ current_alloc_state->alloc_stack = NULL;
73640+ current_alloc_state->alloc_stack_size = 1;
73641+ current_alloc_state->alloc_stack_next = 1;
73642+
73643+ return;
73644+}
73645+
73646+int
73647+acl_alloc_stack_init(unsigned long size)
73648+{
73649+ if ((size * sizeof (void *)) <= PAGE_SIZE)
73650+ current_alloc_state->alloc_stack =
73651+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
73652+ else
73653+ current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
73654+
73655+ current_alloc_state->alloc_stack_size = size;
73656+ current_alloc_state->alloc_stack_next = 1;
73657+
73658+ if (!current_alloc_state->alloc_stack)
73659+ return 0;
73660+ else
73661+ return 1;
73662+}
73663diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
73664new file mode 100644
73665index 0000000..1a94c11
73666--- /dev/null
73667+++ b/grsecurity/gracl_cap.c
73668@@ -0,0 +1,127 @@
73669+#include <linux/kernel.h>
73670+#include <linux/module.h>
73671+#include <linux/sched.h>
73672+#include <linux/gracl.h>
73673+#include <linux/grsecurity.h>
73674+#include <linux/grinternal.h>
73675+
73676+extern const char *captab_log[];
73677+extern int captab_log_entries;
73678+
73679+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
73680+{
73681+ struct acl_subject_label *curracl;
73682+
73683+ if (!gr_acl_is_enabled())
73684+ return 1;
73685+
73686+ curracl = task->acl;
73687+
73688+ if (curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
73689+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
73690+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
73691+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
73692+ gr_to_filename(task->exec_file->f_path.dentry,
73693+ task->exec_file->f_path.mnt) : curracl->filename,
73694+ curracl->filename, 0UL,
73695+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
73696+ return 1;
73697+ }
73698+
73699+ return 0;
73700+}
73701+
73702+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
73703+{
73704+ struct acl_subject_label *curracl;
73705+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
73706+ kernel_cap_t cap_audit = __cap_empty_set;
73707+
73708+ if (!gr_acl_is_enabled())
73709+ return 1;
73710+
73711+ curracl = task->acl;
73712+
73713+ cap_drop = curracl->cap_lower;
73714+ cap_mask = curracl->cap_mask;
73715+ cap_audit = curracl->cap_invert_audit;
73716+
73717+ while ((curracl = curracl->parent_subject)) {
73718+ /* if the cap isn't specified in the current computed mask but is specified in the
73719+ current level subject, and is lowered in the current level subject, then add
73720+ it to the set of dropped capabilities
73721+ otherwise, add the current level subject's mask to the current computed mask
73722+ */
73723+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
73724+ cap_raise(cap_mask, cap);
73725+ if (cap_raised(curracl->cap_lower, cap))
73726+ cap_raise(cap_drop, cap);
73727+ if (cap_raised(curracl->cap_invert_audit, cap))
73728+ cap_raise(cap_audit, cap);
73729+ }
73730+ }
73731+
73732+ if (!cap_raised(cap_drop, cap)) {
73733+ if (cap_raised(cap_audit, cap))
73734+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
73735+ return 1;
73736+ }
73737+
73738+ /* only learn the capability use if the process has the capability in the
73739+ general case, the two uses in sys.c of gr_learn_cap are an exception
73740+ to this rule to ensure any role transition involves what the full-learned
73741+ policy believes in a privileged process
73742+ */
73743+ if (cap_raised(cred->cap_effective, cap) && gr_learn_cap(task, cred, cap))
73744+ return 1;
73745+
73746+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
73747+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
73748+
73749+ return 0;
73750+}
73751+
73752+int
73753+gr_acl_is_capable(const int cap)
73754+{
73755+ return gr_task_acl_is_capable(current, current_cred(), cap);
73756+}
73757+
73758+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
73759+{
73760+ struct acl_subject_label *curracl;
73761+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
73762+
73763+ if (!gr_acl_is_enabled())
73764+ return 1;
73765+
73766+ curracl = task->acl;
73767+
73768+ cap_drop = curracl->cap_lower;
73769+ cap_mask = curracl->cap_mask;
73770+
73771+ while ((curracl = curracl->parent_subject)) {
73772+ /* if the cap isn't specified in the current computed mask but is specified in the
73773+ current level subject, and is lowered in the current level subject, then add
73774+ it to the set of dropped capabilities
73775+ otherwise, add the current level subject's mask to the current computed mask
73776+ */
73777+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
73778+ cap_raise(cap_mask, cap);
73779+ if (cap_raised(curracl->cap_lower, cap))
73780+ cap_raise(cap_drop, cap);
73781+ }
73782+ }
73783+
73784+ if (!cap_raised(cap_drop, cap))
73785+ return 1;
73786+
73787+ return 0;
73788+}
73789+
73790+int
73791+gr_acl_is_capable_nolog(const int cap)
73792+{
73793+ return gr_task_acl_is_capable_nolog(current, cap);
73794+}
73795+
73796diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
73797new file mode 100644
73798index 0000000..a43dd06
73799--- /dev/null
73800+++ b/grsecurity/gracl_compat.c
73801@@ -0,0 +1,269 @@
73802+#include <linux/kernel.h>
73803+#include <linux/gracl.h>
73804+#include <linux/compat.h>
73805+#include <linux/gracl_compat.h>
73806+
73807+#include <asm/uaccess.h>
73808+
73809+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
73810+{
73811+ struct gr_arg_wrapper_compat uwrapcompat;
73812+
73813+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
73814+ return -EFAULT;
73815+
73816+ if ((uwrapcompat.version != GRSECURITY_VERSION) ||
73817+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
73818+ return -EINVAL;
73819+
73820+ uwrap->arg = compat_ptr(uwrapcompat.arg);
73821+ uwrap->version = uwrapcompat.version;
73822+ uwrap->size = sizeof(struct gr_arg);
73823+
73824+ return 0;
73825+}
73826+
73827+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
73828+{
73829+ struct gr_arg_compat argcompat;
73830+
73831+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
73832+ return -EFAULT;
73833+
73834+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
73835+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
73836+ arg->role_db.num_roles = argcompat.role_db.num_roles;
73837+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
73838+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
73839+ arg->role_db.num_objects = argcompat.role_db.num_objects;
73840+
73841+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
73842+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
73843+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
73844+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
73845+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
73846+ arg->segv_device = argcompat.segv_device;
73847+ arg->segv_inode = argcompat.segv_inode;
73848+ arg->segv_uid = argcompat.segv_uid;
73849+ arg->num_sprole_pws = argcompat.num_sprole_pws;
73850+ arg->mode = argcompat.mode;
73851+
73852+ return 0;
73853+}
73854+
73855+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
73856+{
73857+ struct acl_object_label_compat objcompat;
73858+
73859+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
73860+ return -EFAULT;
73861+
73862+ obj->filename = compat_ptr(objcompat.filename);
73863+ obj->inode = objcompat.inode;
73864+ obj->device = objcompat.device;
73865+ obj->mode = objcompat.mode;
73866+
73867+ obj->nested = compat_ptr(objcompat.nested);
73868+ obj->globbed = compat_ptr(objcompat.globbed);
73869+
73870+ obj->prev = compat_ptr(objcompat.prev);
73871+ obj->next = compat_ptr(objcompat.next);
73872+
73873+ return 0;
73874+}
73875+
73876+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
73877+{
73878+ unsigned int i;
73879+ struct acl_subject_label_compat subjcompat;
73880+
73881+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
73882+ return -EFAULT;
73883+
73884+ subj->filename = compat_ptr(subjcompat.filename);
73885+ subj->inode = subjcompat.inode;
73886+ subj->device = subjcompat.device;
73887+ subj->mode = subjcompat.mode;
73888+ subj->cap_mask = subjcompat.cap_mask;
73889+ subj->cap_lower = subjcompat.cap_lower;
73890+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
73891+
73892+ for (i = 0; i < GR_NLIMITS; i++) {
73893+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
73894+ subj->res[i].rlim_cur = RLIM_INFINITY;
73895+ else
73896+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
73897+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
73898+ subj->res[i].rlim_max = RLIM_INFINITY;
73899+ else
73900+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
73901+ }
73902+ subj->resmask = subjcompat.resmask;
73903+
73904+ subj->user_trans_type = subjcompat.user_trans_type;
73905+ subj->group_trans_type = subjcompat.group_trans_type;
73906+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
73907+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
73908+ subj->user_trans_num = subjcompat.user_trans_num;
73909+ subj->group_trans_num = subjcompat.group_trans_num;
73910+
73911+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
73912+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
73913+ subj->ip_type = subjcompat.ip_type;
73914+ subj->ips = compat_ptr(subjcompat.ips);
73915+ subj->ip_num = subjcompat.ip_num;
73916+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
73917+
73918+ subj->crashes = subjcompat.crashes;
73919+ subj->expires = subjcompat.expires;
73920+
73921+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
73922+ subj->hash = compat_ptr(subjcompat.hash);
73923+ subj->prev = compat_ptr(subjcompat.prev);
73924+ subj->next = compat_ptr(subjcompat.next);
73925+
73926+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
73927+ subj->obj_hash_size = subjcompat.obj_hash_size;
73928+ subj->pax_flags = subjcompat.pax_flags;
73929+
73930+ return 0;
73931+}
73932+
73933+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
73934+{
73935+ struct acl_role_label_compat rolecompat;
73936+
73937+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
73938+ return -EFAULT;
73939+
73940+ role->rolename = compat_ptr(rolecompat.rolename);
73941+ role->uidgid = rolecompat.uidgid;
73942+ role->roletype = rolecompat.roletype;
73943+
73944+ role->auth_attempts = rolecompat.auth_attempts;
73945+ role->expires = rolecompat.expires;
73946+
73947+ role->root_label = compat_ptr(rolecompat.root_label);
73948+ role->hash = compat_ptr(rolecompat.hash);
73949+
73950+ role->prev = compat_ptr(rolecompat.prev);
73951+ role->next = compat_ptr(rolecompat.next);
73952+
73953+ role->transitions = compat_ptr(rolecompat.transitions);
73954+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
73955+ role->domain_children = compat_ptr(rolecompat.domain_children);
73956+ role->domain_child_num = rolecompat.domain_child_num;
73957+
73958+ role->umask = rolecompat.umask;
73959+
73960+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
73961+ role->subj_hash_size = rolecompat.subj_hash_size;
73962+
73963+ return 0;
73964+}
73965+
73966+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
73967+{
73968+ struct role_allowed_ip_compat roleip_compat;
73969+
73970+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
73971+ return -EFAULT;
73972+
73973+ roleip->addr = roleip_compat.addr;
73974+ roleip->netmask = roleip_compat.netmask;
73975+
73976+ roleip->prev = compat_ptr(roleip_compat.prev);
73977+ roleip->next = compat_ptr(roleip_compat.next);
73978+
73979+ return 0;
73980+}
73981+
73982+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
73983+{
73984+ struct role_transition_compat trans_compat;
73985+
73986+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
73987+ return -EFAULT;
73988+
73989+ trans->rolename = compat_ptr(trans_compat.rolename);
73990+
73991+ trans->prev = compat_ptr(trans_compat.prev);
73992+ trans->next = compat_ptr(trans_compat.next);
73993+
73994+ return 0;
73995+
73996+}
73997+
73998+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
73999+{
74000+ struct gr_hash_struct_compat hash_compat;
74001+
74002+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
74003+ return -EFAULT;
74004+
74005+ hash->table = compat_ptr(hash_compat.table);
74006+ hash->nametable = compat_ptr(hash_compat.nametable);
74007+ hash->first = compat_ptr(hash_compat.first);
74008+
74009+ hash->table_size = hash_compat.table_size;
74010+ hash->used_size = hash_compat.used_size;
74011+
74012+ hash->type = hash_compat.type;
74013+
74014+ return 0;
74015+}
74016+
74017+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
74018+{
74019+ compat_uptr_t ptrcompat;
74020+
74021+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
74022+ return -EFAULT;
74023+
74024+ *(void **)ptr = compat_ptr(ptrcompat);
74025+
74026+ return 0;
74027+}
74028+
74029+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
74030+{
74031+ struct acl_ip_label_compat ip_compat;
74032+
74033+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
74034+ return -EFAULT;
74035+
74036+ ip->iface = compat_ptr(ip_compat.iface);
74037+ ip->addr = ip_compat.addr;
74038+ ip->netmask = ip_compat.netmask;
74039+ ip->low = ip_compat.low;
74040+ ip->high = ip_compat.high;
74041+ ip->mode = ip_compat.mode;
74042+ ip->type = ip_compat.type;
74043+
74044+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
74045+
74046+ ip->prev = compat_ptr(ip_compat.prev);
74047+ ip->next = compat_ptr(ip_compat.next);
74048+
74049+ return 0;
74050+}
74051+
74052+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
74053+{
74054+ struct sprole_pw_compat pw_compat;
74055+
74056+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
74057+ return -EFAULT;
74058+
74059+ pw->rolename = compat_ptr(pw_compat.rolename);
74060+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
74061+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
74062+
74063+ return 0;
74064+}
74065+
74066+size_t get_gr_arg_wrapper_size_compat(void)
74067+{
74068+ return sizeof(struct gr_arg_wrapper_compat);
74069+}
74070+
74071diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
74072new file mode 100644
74073index 0000000..8ee8e4f
74074--- /dev/null
74075+++ b/grsecurity/gracl_fs.c
74076@@ -0,0 +1,447 @@
74077+#include <linux/kernel.h>
74078+#include <linux/sched.h>
74079+#include <linux/types.h>
74080+#include <linux/fs.h>
74081+#include <linux/file.h>
74082+#include <linux/stat.h>
74083+#include <linux/grsecurity.h>
74084+#include <linux/grinternal.h>
74085+#include <linux/gracl.h>
74086+
74087+umode_t
74088+gr_acl_umask(void)
74089+{
74090+ if (unlikely(!gr_acl_is_enabled()))
74091+ return 0;
74092+
74093+ return current->role->umask;
74094+}
74095+
74096+__u32
74097+gr_acl_handle_hidden_file(const struct dentry * dentry,
74098+ const struct vfsmount * mnt)
74099+{
74100+ __u32 mode;
74101+
74102+ if (unlikely(d_is_negative(dentry)))
74103+ return GR_FIND;
74104+
74105+ mode =
74106+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
74107+
74108+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
74109+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
74110+ return mode;
74111+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
74112+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
74113+ return 0;
74114+ } else if (unlikely(!(mode & GR_FIND)))
74115+ return 0;
74116+
74117+ return GR_FIND;
74118+}
74119+
74120+__u32
74121+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
74122+ int acc_mode)
74123+{
74124+ __u32 reqmode = GR_FIND;
74125+ __u32 mode;
74126+
74127+ if (unlikely(d_is_negative(dentry)))
74128+ return reqmode;
74129+
74130+ if (acc_mode & MAY_APPEND)
74131+ reqmode |= GR_APPEND;
74132+ else if (acc_mode & MAY_WRITE)
74133+ reqmode |= GR_WRITE;
74134+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
74135+ reqmode |= GR_READ;
74136+
74137+ mode =
74138+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
74139+ mnt);
74140+
74141+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
74142+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
74143+ reqmode & GR_READ ? " reading" : "",
74144+ reqmode & GR_WRITE ? " writing" : reqmode &
74145+ GR_APPEND ? " appending" : "");
74146+ return reqmode;
74147+ } else
74148+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
74149+ {
74150+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
74151+ reqmode & GR_READ ? " reading" : "",
74152+ reqmode & GR_WRITE ? " writing" : reqmode &
74153+ GR_APPEND ? " appending" : "");
74154+ return 0;
74155+ } else if (unlikely((mode & reqmode) != reqmode))
74156+ return 0;
74157+
74158+ return reqmode;
74159+}
74160+
74161+__u32
74162+gr_acl_handle_creat(const struct dentry * dentry,
74163+ const struct dentry * p_dentry,
74164+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
74165+ const int imode)
74166+{
74167+ __u32 reqmode = GR_WRITE | GR_CREATE;
74168+ __u32 mode;
74169+
74170+ if (acc_mode & MAY_APPEND)
74171+ reqmode |= GR_APPEND;
74172+ // if a directory was required or the directory already exists, then
74173+ // don't count this open as a read
74174+ if ((acc_mode & MAY_READ) &&
74175+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
74176+ reqmode |= GR_READ;
74177+ if ((open_flags & O_CREAT) &&
74178+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
74179+ reqmode |= GR_SETID;
74180+
74181+ mode =
74182+ gr_check_create(dentry, p_dentry, p_mnt,
74183+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
74184+
74185+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
74186+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
74187+ reqmode & GR_READ ? " reading" : "",
74188+ reqmode & GR_WRITE ? " writing" : reqmode &
74189+ GR_APPEND ? " appending" : "");
74190+ return reqmode;
74191+ } else
74192+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
74193+ {
74194+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
74195+ reqmode & GR_READ ? " reading" : "",
74196+ reqmode & GR_WRITE ? " writing" : reqmode &
74197+ GR_APPEND ? " appending" : "");
74198+ return 0;
74199+ } else if (unlikely((mode & reqmode) != reqmode))
74200+ return 0;
74201+
74202+ return reqmode;
74203+}
74204+
74205+__u32
74206+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
74207+ const int fmode)
74208+{
74209+ __u32 mode, reqmode = GR_FIND;
74210+
74211+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
74212+ reqmode |= GR_EXEC;
74213+ if (fmode & S_IWOTH)
74214+ reqmode |= GR_WRITE;
74215+ if (fmode & S_IROTH)
74216+ reqmode |= GR_READ;
74217+
74218+ mode =
74219+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
74220+ mnt);
74221+
74222+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
74223+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
74224+ reqmode & GR_READ ? " reading" : "",
74225+ reqmode & GR_WRITE ? " writing" : "",
74226+ reqmode & GR_EXEC ? " executing" : "");
74227+ return reqmode;
74228+ } else
74229+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
74230+ {
74231+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
74232+ reqmode & GR_READ ? " reading" : "",
74233+ reqmode & GR_WRITE ? " writing" : "",
74234+ reqmode & GR_EXEC ? " executing" : "");
74235+ return 0;
74236+ } else if (unlikely((mode & reqmode) != reqmode))
74237+ return 0;
74238+
74239+ return reqmode;
74240+}
74241+
74242+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
74243+{
74244+ __u32 mode;
74245+
74246+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
74247+
74248+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
74249+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
74250+ return mode;
74251+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
74252+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
74253+ return 0;
74254+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
74255+ return 0;
74256+
74257+ return (reqmode);
74258+}
74259+
74260+__u32
74261+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
74262+{
74263+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
74264+}
74265+
74266+__u32
74267+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
74268+{
74269+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
74270+}
74271+
74272+__u32
74273+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
74274+{
74275+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
74276+}
74277+
74278+__u32
74279+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
74280+{
74281+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
74282+}
74283+
74284+__u32
74285+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
74286+ umode_t *modeptr)
74287+{
74288+ umode_t mode;
74289+
74290+ *modeptr &= ~gr_acl_umask();
74291+ mode = *modeptr;
74292+
74293+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
74294+ return 1;
74295+
74296+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
74297+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
74298+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
74299+ GR_CHMOD_ACL_MSG);
74300+ } else {
74301+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
74302+ }
74303+}
74304+
74305+__u32
74306+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
74307+{
74308+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
74309+}
74310+
74311+__u32
74312+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
74313+{
74314+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
74315+}
74316+
74317+__u32
74318+gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
74319+{
74320+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
74321+}
74322+
74323+__u32
74324+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
74325+{
74326+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
74327+}
74328+
74329+__u32
74330+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
74331+{
74332+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
74333+ GR_UNIXCONNECT_ACL_MSG);
74334+}
74335+
74336+/* hardlinks require at minimum create and link permission,
74337+ any additional privilege required is based on the
74338+ privilege of the file being linked to
74339+*/
74340+__u32
74341+gr_acl_handle_link(const struct dentry * new_dentry,
74342+ const struct dentry * parent_dentry,
74343+ const struct vfsmount * parent_mnt,
74344+ const struct dentry * old_dentry,
74345+ const struct vfsmount * old_mnt, const struct filename *to)
74346+{
74347+ __u32 mode;
74348+ __u32 needmode = GR_CREATE | GR_LINK;
74349+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
74350+
74351+ mode =
74352+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
74353+ old_mnt);
74354+
74355+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
74356+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
74357+ return mode;
74358+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
74359+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
74360+ return 0;
74361+ } else if (unlikely((mode & needmode) != needmode))
74362+ return 0;
74363+
74364+ return 1;
74365+}
74366+
74367+__u32
74368+gr_acl_handle_symlink(const struct dentry * new_dentry,
74369+ const struct dentry * parent_dentry,
74370+ const struct vfsmount * parent_mnt, const struct filename *from)
74371+{
74372+ __u32 needmode = GR_WRITE | GR_CREATE;
74373+ __u32 mode;
74374+
74375+ mode =
74376+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
74377+ GR_CREATE | GR_AUDIT_CREATE |
74378+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
74379+
74380+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
74381+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
74382+ return mode;
74383+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
74384+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
74385+ return 0;
74386+ } else if (unlikely((mode & needmode) != needmode))
74387+ return 0;
74388+
74389+ return (GR_WRITE | GR_CREATE);
74390+}
74391+
74392+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
74393+{
74394+ __u32 mode;
74395+
74396+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
74397+
74398+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
74399+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
74400+ return mode;
74401+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
74402+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
74403+ return 0;
74404+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
74405+ return 0;
74406+
74407+ return (reqmode);
74408+}
74409+
74410+__u32
74411+gr_acl_handle_mknod(const struct dentry * new_dentry,
74412+ const struct dentry * parent_dentry,
74413+ const struct vfsmount * parent_mnt,
74414+ const int mode)
74415+{
74416+ __u32 reqmode = GR_WRITE | GR_CREATE;
74417+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
74418+ reqmode |= GR_SETID;
74419+
74420+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
74421+ reqmode, GR_MKNOD_ACL_MSG);
74422+}
74423+
74424+__u32
74425+gr_acl_handle_mkdir(const struct dentry *new_dentry,
74426+ const struct dentry *parent_dentry,
74427+ const struct vfsmount *parent_mnt)
74428+{
74429+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
74430+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
74431+}
74432+
74433+#define RENAME_CHECK_SUCCESS(old, new) \
74434+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
74435+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
74436+
74437+int
74438+gr_acl_handle_rename(struct dentry *new_dentry,
74439+ struct dentry *parent_dentry,
74440+ const struct vfsmount *parent_mnt,
74441+ struct dentry *old_dentry,
74442+ struct inode *old_parent_inode,
74443+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags)
74444+{
74445+ __u32 comp1, comp2;
74446+ int error = 0;
74447+
74448+ if (unlikely(!gr_acl_is_enabled()))
74449+ return 0;
74450+
74451+ if (flags & RENAME_EXCHANGE) {
74452+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
74453+ GR_AUDIT_READ | GR_AUDIT_WRITE |
74454+ GR_SUPPRESS, parent_mnt);
74455+ comp2 =
74456+ gr_search_file(old_dentry,
74457+ GR_READ | GR_WRITE | GR_AUDIT_READ |
74458+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
74459+ } else if (d_is_negative(new_dentry)) {
74460+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
74461+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
74462+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
74463+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
74464+ GR_DELETE | GR_AUDIT_DELETE |
74465+ GR_AUDIT_READ | GR_AUDIT_WRITE |
74466+ GR_SUPPRESS, old_mnt);
74467+ } else {
74468+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
74469+ GR_CREATE | GR_DELETE |
74470+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
74471+ GR_AUDIT_READ | GR_AUDIT_WRITE |
74472+ GR_SUPPRESS, parent_mnt);
74473+ comp2 =
74474+ gr_search_file(old_dentry,
74475+ GR_READ | GR_WRITE | GR_AUDIT_READ |
74476+ GR_DELETE | GR_AUDIT_DELETE |
74477+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
74478+ }
74479+
74480+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
74481+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
74482+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
74483+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
74484+ && !(comp2 & GR_SUPPRESS)) {
74485+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
74486+ error = -EACCES;
74487+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
74488+ error = -EACCES;
74489+
74490+ return error;
74491+}
74492+
74493+void
74494+gr_acl_handle_exit(void)
74495+{
74496+ u16 id;
74497+ char *rolename;
74498+
74499+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
74500+ !(current->role->roletype & GR_ROLE_PERSIST))) {
74501+ id = current->acl_role_id;
74502+ rolename = current->role->rolename;
74503+ gr_set_acls(1);
74504+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
74505+ }
74506+
74507+ gr_put_exec_file(current);
74508+ return;
74509+}
74510+
74511+int
74512+gr_acl_handle_procpidmem(const struct task_struct *task)
74513+{
74514+ if (unlikely(!gr_acl_is_enabled()))
74515+ return 0;
74516+
74517+ if (task != current && (task->acl->mode & GR_PROTPROCFD) &&
74518+ !(current->acl->mode & GR_POVERRIDE) &&
74519+ !(current->role->roletype & GR_ROLE_GOD))
74520+ return -EACCES;
74521+
74522+ return 0;
74523+}
74524diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
74525new file mode 100644
74526index 0000000..f056b81
74527--- /dev/null
74528+++ b/grsecurity/gracl_ip.c
74529@@ -0,0 +1,386 @@
74530+#include <linux/kernel.h>
74531+#include <asm/uaccess.h>
74532+#include <asm/errno.h>
74533+#include <net/sock.h>
74534+#include <linux/file.h>
74535+#include <linux/fs.h>
74536+#include <linux/net.h>
74537+#include <linux/in.h>
74538+#include <linux/skbuff.h>
74539+#include <linux/ip.h>
74540+#include <linux/udp.h>
74541+#include <linux/types.h>
74542+#include <linux/sched.h>
74543+#include <linux/netdevice.h>
74544+#include <linux/inetdevice.h>
74545+#include <linux/gracl.h>
74546+#include <linux/grsecurity.h>
74547+#include <linux/grinternal.h>
74548+
74549+#define GR_BIND 0x01
74550+#define GR_CONNECT 0x02
74551+#define GR_INVERT 0x04
74552+#define GR_BINDOVERRIDE 0x08
74553+#define GR_CONNECTOVERRIDE 0x10
74554+#define GR_SOCK_FAMILY 0x20
74555+
74556+static const char * gr_protocols[IPPROTO_MAX] = {
74557+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
74558+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
74559+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
74560+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
74561+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
74562+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
74563+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
74564+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
74565+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
74566+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
74567+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
74568+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
74569+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
74570+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
74571+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
74572+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
74573+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
74574+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
74575+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
74576+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
74577+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
74578+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
74579+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
74580+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
74581+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
74582+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
74583+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
74584+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
74585+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
74586+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
74587+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
74588+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
74589+ };
74590+
74591+static const char * gr_socktypes[SOCK_MAX] = {
74592+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
74593+ "unknown:7", "unknown:8", "unknown:9", "packet"
74594+ };
74595+
74596+static const char * gr_sockfamilies[AF_MAX+1] = {
74597+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
74598+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
74599+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
74600+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
74601+ };
74602+
74603+const char *
74604+gr_proto_to_name(unsigned char proto)
74605+{
74606+ return gr_protocols[proto];
74607+}
74608+
74609+const char *
74610+gr_socktype_to_name(unsigned char type)
74611+{
74612+ return gr_socktypes[type];
74613+}
74614+
74615+const char *
74616+gr_sockfamily_to_name(unsigned char family)
74617+{
74618+ return gr_sockfamilies[family];
74619+}
74620+
74621+extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
74622+
74623+int
74624+gr_search_socket(const int domain, const int type, const int protocol)
74625+{
74626+ struct acl_subject_label *curr;
74627+ const struct cred *cred = current_cred();
74628+
74629+ if (unlikely(!gr_acl_is_enabled()))
74630+ goto exit;
74631+
74632+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
74633+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
74634+ goto exit; // let the kernel handle it
74635+
74636+ curr = current->acl;
74637+
74638+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
74639+ /* the family is allowed, if this is PF_INET allow it only if
74640+ the extra sock type/protocol checks pass */
74641+ if (domain == PF_INET)
74642+ goto inet_check;
74643+ goto exit;
74644+ } else {
74645+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
74646+ __u32 fakeip = 0;
74647+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
74648+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
74649+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
74650+ gr_to_filename(current->exec_file->f_path.dentry,
74651+ current->exec_file->f_path.mnt) :
74652+ curr->filename, curr->filename,
74653+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
74654+ &current->signal->saved_ip);
74655+ goto exit;
74656+ }
74657+ goto exit_fail;
74658+ }
74659+
74660+inet_check:
74661+ /* the rest of this checking is for IPv4 only */
74662+ if (!curr->ips)
74663+ goto exit;
74664+
74665+ if ((curr->ip_type & (1U << type)) &&
74666+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
74667+ goto exit;
74668+
74669+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
74670+ /* we don't place acls on raw sockets , and sometimes
74671+ dgram/ip sockets are opened for ioctl and not
74672+ bind/connect, so we'll fake a bind learn log */
74673+ if (type == SOCK_RAW || type == SOCK_PACKET) {
74674+ __u32 fakeip = 0;
74675+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
74676+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
74677+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
74678+ gr_to_filename(current->exec_file->f_path.dentry,
74679+ current->exec_file->f_path.mnt) :
74680+ curr->filename, curr->filename,
74681+ &fakeip, 0, type,
74682+ protocol, GR_CONNECT, &current->signal->saved_ip);
74683+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
74684+ __u32 fakeip = 0;
74685+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
74686+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
74687+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
74688+ gr_to_filename(current->exec_file->f_path.dentry,
74689+ current->exec_file->f_path.mnt) :
74690+ curr->filename, curr->filename,
74691+ &fakeip, 0, type,
74692+ protocol, GR_BIND, &current->signal->saved_ip);
74693+ }
74694+ /* we'll log when they use connect or bind */
74695+ goto exit;
74696+ }
74697+
74698+exit_fail:
74699+ if (domain == PF_INET)
74700+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
74701+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
74702+ else if (rcu_access_pointer(net_families[domain]) != NULL)
74703+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
74704+ gr_socktype_to_name(type), protocol);
74705+
74706+ return 0;
74707+exit:
74708+ return 1;
74709+}
74710+
74711+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
74712+{
74713+ if ((ip->mode & mode) &&
74714+ (ip_port >= ip->low) &&
74715+ (ip_port <= ip->high) &&
74716+ ((ntohl(ip_addr) & our_netmask) ==
74717+ (ntohl(our_addr) & our_netmask))
74718+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
74719+ && (ip->type & (1U << type))) {
74720+ if (ip->mode & GR_INVERT)
74721+ return 2; // specifically denied
74722+ else
74723+ return 1; // allowed
74724+ }
74725+
74726+ return 0; // not specifically allowed, may continue parsing
74727+}
74728+
74729+static int
74730+gr_search_connectbind(const int full_mode, struct sock *sk,
74731+ struct sockaddr_in *addr, const int type)
74732+{
74733+ char iface[IFNAMSIZ] = {0};
74734+ struct acl_subject_label *curr;
74735+ struct acl_ip_label *ip;
74736+ struct inet_sock *isk;
74737+ struct net_device *dev;
74738+ struct in_device *idev;
74739+ unsigned long i;
74740+ int ret;
74741+ int mode = full_mode & (GR_BIND | GR_CONNECT);
74742+ __u32 ip_addr = 0;
74743+ __u32 our_addr;
74744+ __u32 our_netmask;
74745+ char *p;
74746+ __u16 ip_port = 0;
74747+ const struct cred *cred = current_cred();
74748+
74749+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
74750+ return 0;
74751+
74752+ curr = current->acl;
74753+ isk = inet_sk(sk);
74754+
74755+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
74756+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
74757+ addr->sin_addr.s_addr = curr->inaddr_any_override;
74758+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
74759+ struct sockaddr_in saddr;
74760+ int err;
74761+
74762+ saddr.sin_family = AF_INET;
74763+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
74764+ saddr.sin_port = isk->inet_sport;
74765+
74766+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
74767+ if (err)
74768+ return err;
74769+
74770+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
74771+ if (err)
74772+ return err;
74773+ }
74774+
74775+ if (!curr->ips)
74776+ return 0;
74777+
74778+ ip_addr = addr->sin_addr.s_addr;
74779+ ip_port = ntohs(addr->sin_port);
74780+
74781+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
74782+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
74783+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
74784+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
74785+ gr_to_filename(current->exec_file->f_path.dentry,
74786+ current->exec_file->f_path.mnt) :
74787+ curr->filename, curr->filename,
74788+ &ip_addr, ip_port, type,
74789+ sk->sk_protocol, mode, &current->signal->saved_ip);
74790+ return 0;
74791+ }
74792+
74793+ for (i = 0; i < curr->ip_num; i++) {
74794+ ip = *(curr->ips + i);
74795+ if (ip->iface != NULL) {
74796+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
74797+ p = strchr(iface, ':');
74798+ if (p != NULL)
74799+ *p = '\0';
74800+ dev = dev_get_by_name(sock_net(sk), iface);
74801+ if (dev == NULL)
74802+ continue;
74803+ idev = in_dev_get(dev);
74804+ if (idev == NULL) {
74805+ dev_put(dev);
74806+ continue;
74807+ }
74808+ rcu_read_lock();
74809+ for_ifa(idev) {
74810+ if (!strcmp(ip->iface, ifa->ifa_label)) {
74811+ our_addr = ifa->ifa_address;
74812+ our_netmask = 0xffffffff;
74813+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
74814+ if (ret == 1) {
74815+ rcu_read_unlock();
74816+ in_dev_put(idev);
74817+ dev_put(dev);
74818+ return 0;
74819+ } else if (ret == 2) {
74820+ rcu_read_unlock();
74821+ in_dev_put(idev);
74822+ dev_put(dev);
74823+ goto denied;
74824+ }
74825+ }
74826+ } endfor_ifa(idev);
74827+ rcu_read_unlock();
74828+ in_dev_put(idev);
74829+ dev_put(dev);
74830+ } else {
74831+ our_addr = ip->addr;
74832+ our_netmask = ip->netmask;
74833+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
74834+ if (ret == 1)
74835+ return 0;
74836+ else if (ret == 2)
74837+ goto denied;
74838+ }
74839+ }
74840+
74841+denied:
74842+ if (mode == GR_BIND)
74843+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
74844+ else if (mode == GR_CONNECT)
74845+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
74846+
74847+ return -EACCES;
74848+}
74849+
74850+int
74851+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
74852+{
74853+ /* always allow disconnection of dgram sockets with connect */
74854+ if (addr->sin_family == AF_UNSPEC)
74855+ return 0;
74856+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
74857+}
74858+
74859+int
74860+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
74861+{
74862+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
74863+}
74864+
74865+int gr_search_listen(struct socket *sock)
74866+{
74867+ struct sock *sk = sock->sk;
74868+ struct sockaddr_in addr;
74869+
74870+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
74871+ addr.sin_port = inet_sk(sk)->inet_sport;
74872+
74873+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
74874+}
74875+
74876+int gr_search_accept(struct socket *sock)
74877+{
74878+ struct sock *sk = sock->sk;
74879+ struct sockaddr_in addr;
74880+
74881+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
74882+ addr.sin_port = inet_sk(sk)->inet_sport;
74883+
74884+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
74885+}
74886+
74887+int
74888+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
74889+{
74890+ if (addr)
74891+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
74892+ else {
74893+ struct sockaddr_in sin;
74894+ const struct inet_sock *inet = inet_sk(sk);
74895+
74896+ sin.sin_addr.s_addr = inet->inet_daddr;
74897+ sin.sin_port = inet->inet_dport;
74898+
74899+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
74900+ }
74901+}
74902+
74903+int
74904+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
74905+{
74906+ struct sockaddr_in sin;
74907+
74908+ if (unlikely(skb->len < sizeof (struct udphdr)))
74909+ return 0; // skip this packet
74910+
74911+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
74912+ sin.sin_port = udp_hdr(skb)->source;
74913+
74914+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
74915+}
74916diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
74917new file mode 100644
74918index 0000000..25f54ef
74919--- /dev/null
74920+++ b/grsecurity/gracl_learn.c
74921@@ -0,0 +1,207 @@
74922+#include <linux/kernel.h>
74923+#include <linux/mm.h>
74924+#include <linux/sched.h>
74925+#include <linux/poll.h>
74926+#include <linux/string.h>
74927+#include <linux/file.h>
74928+#include <linux/types.h>
74929+#include <linux/vmalloc.h>
74930+#include <linux/grinternal.h>
74931+
74932+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
74933+ size_t count, loff_t *ppos);
74934+extern int gr_acl_is_enabled(void);
74935+
74936+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
74937+static int gr_learn_attached;
74938+
74939+/* use a 512k buffer */
74940+#define LEARN_BUFFER_SIZE (512 * 1024)
74941+
74942+static DEFINE_SPINLOCK(gr_learn_lock);
74943+static DEFINE_MUTEX(gr_learn_user_mutex);
74944+
74945+/* we need to maintain two buffers, so that the kernel context of grlearn
74946+ uses a semaphore around the userspace copying, and the other kernel contexts
74947+ use a spinlock when copying into the buffer, since they cannot sleep
74948+*/
74949+static char *learn_buffer;
74950+static char *learn_buffer_user;
74951+static int learn_buffer_len;
74952+static int learn_buffer_user_len;
74953+
74954+static ssize_t
74955+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
74956+{
74957+ DECLARE_WAITQUEUE(wait, current);
74958+ ssize_t retval = 0;
74959+
74960+ add_wait_queue(&learn_wait, &wait);
74961+ set_current_state(TASK_INTERRUPTIBLE);
74962+ do {
74963+ mutex_lock(&gr_learn_user_mutex);
74964+ spin_lock(&gr_learn_lock);
74965+ if (learn_buffer_len)
74966+ break;
74967+ spin_unlock(&gr_learn_lock);
74968+ mutex_unlock(&gr_learn_user_mutex);
74969+ if (file->f_flags & O_NONBLOCK) {
74970+ retval = -EAGAIN;
74971+ goto out;
74972+ }
74973+ if (signal_pending(current)) {
74974+ retval = -ERESTARTSYS;
74975+ goto out;
74976+ }
74977+
74978+ schedule();
74979+ } while (1);
74980+
74981+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
74982+ learn_buffer_user_len = learn_buffer_len;
74983+ retval = learn_buffer_len;
74984+ learn_buffer_len = 0;
74985+
74986+ spin_unlock(&gr_learn_lock);
74987+
74988+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
74989+ retval = -EFAULT;
74990+
74991+ mutex_unlock(&gr_learn_user_mutex);
74992+out:
74993+ set_current_state(TASK_RUNNING);
74994+ remove_wait_queue(&learn_wait, &wait);
74995+ return retval;
74996+}
74997+
74998+static unsigned int
74999+poll_learn(struct file * file, poll_table * wait)
75000+{
75001+ poll_wait(file, &learn_wait, wait);
75002+
75003+ if (learn_buffer_len)
75004+ return (POLLIN | POLLRDNORM);
75005+
75006+ return 0;
75007+}
75008+
75009+void
75010+gr_clear_learn_entries(void)
75011+{
75012+ char *tmp;
75013+
75014+ mutex_lock(&gr_learn_user_mutex);
75015+ spin_lock(&gr_learn_lock);
75016+ tmp = learn_buffer;
75017+ learn_buffer = NULL;
75018+ spin_unlock(&gr_learn_lock);
75019+ if (tmp)
75020+ vfree(tmp);
75021+ if (learn_buffer_user != NULL) {
75022+ vfree(learn_buffer_user);
75023+ learn_buffer_user = NULL;
75024+ }
75025+ learn_buffer_len = 0;
75026+ mutex_unlock(&gr_learn_user_mutex);
75027+
75028+ return;
75029+}
75030+
75031+void
75032+gr_add_learn_entry(const char *fmt, ...)
75033+{
75034+ va_list args;
75035+ unsigned int len;
75036+
75037+ if (!gr_learn_attached)
75038+ return;
75039+
75040+ spin_lock(&gr_learn_lock);
75041+
75042+ /* leave a gap at the end so we know when it's "full" but don't have to
75043+ compute the exact length of the string we're trying to append
75044+ */
75045+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
75046+ spin_unlock(&gr_learn_lock);
75047+ wake_up_interruptible(&learn_wait);
75048+ return;
75049+ }
75050+ if (learn_buffer == NULL) {
75051+ spin_unlock(&gr_learn_lock);
75052+ return;
75053+ }
75054+
75055+ va_start(args, fmt);
75056+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
75057+ va_end(args);
75058+
75059+ learn_buffer_len += len + 1;
75060+
75061+ spin_unlock(&gr_learn_lock);
75062+ wake_up_interruptible(&learn_wait);
75063+
75064+ return;
75065+}
75066+
75067+static int
75068+open_learn(struct inode *inode, struct file *file)
75069+{
75070+ if (file->f_mode & FMODE_READ && gr_learn_attached)
75071+ return -EBUSY;
75072+ if (file->f_mode & FMODE_READ) {
75073+ int retval = 0;
75074+ mutex_lock(&gr_learn_user_mutex);
75075+ if (learn_buffer == NULL)
75076+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
75077+ if (learn_buffer_user == NULL)
75078+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
75079+ if (learn_buffer == NULL) {
75080+ retval = -ENOMEM;
75081+ goto out_error;
75082+ }
75083+ if (learn_buffer_user == NULL) {
75084+ retval = -ENOMEM;
75085+ goto out_error;
75086+ }
75087+ learn_buffer_len = 0;
75088+ learn_buffer_user_len = 0;
75089+ gr_learn_attached = 1;
75090+out_error:
75091+ mutex_unlock(&gr_learn_user_mutex);
75092+ return retval;
75093+ }
75094+ return 0;
75095+}
75096+
75097+static int
75098+close_learn(struct inode *inode, struct file *file)
75099+{
75100+ if (file->f_mode & FMODE_READ) {
75101+ char *tmp = NULL;
75102+ mutex_lock(&gr_learn_user_mutex);
75103+ spin_lock(&gr_learn_lock);
75104+ tmp = learn_buffer;
75105+ learn_buffer = NULL;
75106+ spin_unlock(&gr_learn_lock);
75107+ if (tmp)
75108+ vfree(tmp);
75109+ if (learn_buffer_user != NULL) {
75110+ vfree(learn_buffer_user);
75111+ learn_buffer_user = NULL;
75112+ }
75113+ learn_buffer_len = 0;
75114+ learn_buffer_user_len = 0;
75115+ gr_learn_attached = 0;
75116+ mutex_unlock(&gr_learn_user_mutex);
75117+ }
75118+
75119+ return 0;
75120+}
75121+
75122+const struct file_operations grsec_fops = {
75123+ .read = read_learn,
75124+ .write = write_grsec_handler,
75125+ .open = open_learn,
75126+ .release = close_learn,
75127+ .poll = poll_learn,
75128+};
75129diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
75130new file mode 100644
75131index 0000000..fd26052
75132--- /dev/null
75133+++ b/grsecurity/gracl_policy.c
75134@@ -0,0 +1,1781 @@
75135+#include <linux/kernel.h>
75136+#include <linux/module.h>
75137+#include <linux/sched.h>
75138+#include <linux/mm.h>
75139+#include <linux/file.h>
75140+#include <linux/fs.h>
75141+#include <linux/namei.h>
75142+#include <linux/mount.h>
75143+#include <linux/tty.h>
75144+#include <linux/proc_fs.h>
75145+#include <linux/lglock.h>
75146+#include <linux/slab.h>
75147+#include <linux/vmalloc.h>
75148+#include <linux/types.h>
75149+#include <linux/sysctl.h>
75150+#include <linux/netdevice.h>
75151+#include <linux/ptrace.h>
75152+#include <linux/gracl.h>
75153+#include <linux/gralloc.h>
75154+#include <linux/security.h>
75155+#include <linux/grinternal.h>
75156+#include <linux/pid_namespace.h>
75157+#include <linux/stop_machine.h>
75158+#include <linux/fdtable.h>
75159+#include <linux/percpu.h>
75160+#include <linux/lglock.h>
75161+#include <linux/hugetlb.h>
75162+#include <linux/posix-timers.h>
75163+#include "../fs/mount.h"
75164+
75165+#include <asm/uaccess.h>
75166+#include <asm/errno.h>
75167+#include <asm/mman.h>
75168+
75169+extern struct gr_policy_state *polstate;
75170+
75171+#define FOR_EACH_ROLE_START(role) \
75172+ role = polstate->role_list; \
75173+ while (role) {
75174+
75175+#define FOR_EACH_ROLE_END(role) \
75176+ role = role->prev; \
75177+ }
75178+
75179+struct path gr_real_root;
75180+
75181+extern struct gr_alloc_state *current_alloc_state;
75182+
75183+u16 acl_sp_role_value;
75184+
75185+static DEFINE_MUTEX(gr_dev_mutex);
75186+
75187+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
75188+extern void gr_clear_learn_entries(void);
75189+
75190+struct gr_arg *gr_usermode __read_only;
75191+unsigned char *gr_system_salt __read_only;
75192+unsigned char *gr_system_sum __read_only;
75193+
75194+static unsigned int gr_auth_attempts = 0;
75195+static unsigned long gr_auth_expires = 0UL;
75196+
75197+struct acl_object_label *fakefs_obj_rw;
75198+struct acl_object_label *fakefs_obj_rwx;
75199+
75200+extern int gr_init_uidset(void);
75201+extern void gr_free_uidset(void);
75202+extern void gr_remove_uid(uid_t uid);
75203+extern int gr_find_uid(uid_t uid);
75204+
75205+extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback);
75206+extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
75207+extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
75208+extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
75209+extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
75210+extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
75211+extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
75212+extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
75213+extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
75214+extern struct acl_subject_label *lookup_acl_subj_label(const u64 ino, const dev_t dev, const struct acl_role_label *role);
75215+extern struct acl_subject_label *lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev, const struct acl_role_label *role);
75216+extern void assign_special_role(const char *rolename);
75217+extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
75218+extern int gr_rbac_disable(void *unused);
75219+extern void gr_enable_rbac_system(void);
75220+
75221+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
75222+{
75223+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
75224+ return -EFAULT;
75225+
75226+ return 0;
75227+}
75228+
75229+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
75230+{
75231+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
75232+ return -EFAULT;
75233+
75234+ return 0;
75235+}
75236+
75237+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
75238+{
75239+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
75240+ return -EFAULT;
75241+
75242+ return 0;
75243+}
75244+
75245+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
75246+{
75247+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
75248+ return -EFAULT;
75249+
75250+ return 0;
75251+}
75252+
75253+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
75254+{
75255+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
75256+ return -EFAULT;
75257+
75258+ return 0;
75259+}
75260+
75261+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
75262+{
75263+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
75264+ return -EFAULT;
75265+
75266+ return 0;
75267+}
75268+
75269+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
75270+{
75271+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
75272+ return -EFAULT;
75273+
75274+ return 0;
75275+}
75276+
75277+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
75278+{
75279+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
75280+ return -EFAULT;
75281+
75282+ return 0;
75283+}
75284+
75285+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
75286+{
75287+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
75288+ return -EFAULT;
75289+
75290+ return 0;
75291+}
75292+
75293+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
75294+{
75295+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
75296+ return -EFAULT;
75297+
75298+ if ((uwrap->version != GRSECURITY_VERSION) ||
75299+ (uwrap->size != sizeof(struct gr_arg)))
75300+ return -EINVAL;
75301+
75302+ return 0;
75303+}
75304+
75305+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
75306+{
75307+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
75308+ return -EFAULT;
75309+
75310+ return 0;
75311+}
75312+
75313+static size_t get_gr_arg_wrapper_size_normal(void)
75314+{
75315+ return sizeof(struct gr_arg_wrapper);
75316+}
75317+
75318+#ifdef CONFIG_COMPAT
75319+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
75320+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
75321+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
75322+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
75323+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
75324+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
75325+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
75326+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
75327+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
75328+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
75329+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
75330+extern size_t get_gr_arg_wrapper_size_compat(void);
75331+
75332+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
75333+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
75334+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
75335+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
75336+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
75337+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
75338+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
75339+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
75340+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
75341+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
75342+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
75343+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
75344+
75345+#else
75346+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
75347+#define copy_gr_arg copy_gr_arg_normal
75348+#define copy_gr_hash_struct copy_gr_hash_struct_normal
75349+#define copy_acl_object_label copy_acl_object_label_normal
75350+#define copy_acl_subject_label copy_acl_subject_label_normal
75351+#define copy_acl_role_label copy_acl_role_label_normal
75352+#define copy_acl_ip_label copy_acl_ip_label_normal
75353+#define copy_pointer_from_array copy_pointer_from_array_normal
75354+#define copy_sprole_pw copy_sprole_pw_normal
75355+#define copy_role_transition copy_role_transition_normal
75356+#define copy_role_allowed_ip copy_role_allowed_ip_normal
75357+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
75358+#endif
75359+
75360+static struct acl_subject_label *
75361+lookup_subject_map(const struct acl_subject_label *userp)
75362+{
75363+ unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
75364+ struct subject_map *match;
75365+
75366+ match = polstate->subj_map_set.s_hash[index];
75367+
75368+ while (match && match->user != userp)
75369+ match = match->next;
75370+
75371+ if (match != NULL)
75372+ return match->kernel;
75373+ else
75374+ return NULL;
75375+}
75376+
75377+static void
75378+insert_subj_map_entry(struct subject_map *subjmap)
75379+{
75380+ unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
75381+ struct subject_map **curr;
75382+
75383+ subjmap->prev = NULL;
75384+
75385+ curr = &polstate->subj_map_set.s_hash[index];
75386+ if (*curr != NULL)
75387+ (*curr)->prev = subjmap;
75388+
75389+ subjmap->next = *curr;
75390+ *curr = subjmap;
75391+
75392+ return;
75393+}
75394+
75395+static void
75396+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
75397+{
75398+ unsigned int index =
75399+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
75400+ struct acl_role_label **curr;
75401+ struct acl_role_label *tmp, *tmp2;
75402+
75403+ curr = &polstate->acl_role_set.r_hash[index];
75404+
75405+ /* simple case, slot is empty, just set it to our role */
75406+ if (*curr == NULL) {
75407+ *curr = role;
75408+ } else {
75409+ /* example:
75410+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
75411+ 2 -> 3
75412+ */
75413+ /* first check to see if we can already be reached via this slot */
75414+ tmp = *curr;
75415+ while (tmp && tmp != role)
75416+ tmp = tmp->next;
75417+ if (tmp == role) {
75418+ /* we don't need to add ourselves to this slot's chain */
75419+ return;
75420+ }
75421+ /* we need to add ourselves to this chain, two cases */
75422+ if (role->next == NULL) {
75423+ /* simple case, append the current chain to our role */
75424+ role->next = *curr;
75425+ *curr = role;
75426+ } else {
75427+ /* 1 -> 2 -> 3 -> 4
75428+ 2 -> 3 -> 4
75429+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
75430+ */
75431+ /* trickier case: walk our role's chain until we find
75432+ the role for the start of the current slot's chain */
75433+ tmp = role;
75434+ tmp2 = *curr;
75435+ while (tmp->next && tmp->next != tmp2)
75436+ tmp = tmp->next;
75437+ if (tmp->next == tmp2) {
75438+ /* from example above, we found 3, so just
75439+ replace this slot's chain with ours */
75440+ *curr = role;
75441+ } else {
75442+ /* we didn't find a subset of our role's chain
75443+ in the current slot's chain, so append their
75444+ chain to ours, and set us as the first role in
75445+ the slot's chain
75446+
75447+ we could fold this case with the case above,
75448+ but making it explicit for clarity
75449+ */
75450+ tmp->next = tmp2;
75451+ *curr = role;
75452+ }
75453+ }
75454+ }
75455+
75456+ return;
75457+}
75458+
75459+static void
75460+insert_acl_role_label(struct acl_role_label *role)
75461+{
75462+ int i;
75463+
75464+ if (polstate->role_list == NULL) {
75465+ polstate->role_list = role;
75466+ role->prev = NULL;
75467+ } else {
75468+ role->prev = polstate->role_list;
75469+ polstate->role_list = role;
75470+ }
75471+
75472+ /* used for hash chains */
75473+ role->next = NULL;
75474+
75475+ if (role->roletype & GR_ROLE_DOMAIN) {
75476+ for (i = 0; i < role->domain_child_num; i++)
75477+ __insert_acl_role_label(role, role->domain_children[i]);
75478+ } else
75479+ __insert_acl_role_label(role, role->uidgid);
75480+}
75481+
75482+static int
75483+insert_name_entry(char *name, const u64 inode, const dev_t device, __u8 deleted)
75484+{
75485+ struct name_entry **curr, *nentry;
75486+ struct inodev_entry *ientry;
75487+ unsigned int len = strlen(name);
75488+ unsigned int key = full_name_hash(name, len);
75489+ unsigned int index = key % polstate->name_set.n_size;
75490+
75491+ curr = &polstate->name_set.n_hash[index];
75492+
75493+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
75494+ curr = &((*curr)->next);
75495+
75496+ if (*curr != NULL)
75497+ return 1;
75498+
75499+ nentry = acl_alloc(sizeof (struct name_entry));
75500+ if (nentry == NULL)
75501+ return 0;
75502+ ientry = acl_alloc(sizeof (struct inodev_entry));
75503+ if (ientry == NULL)
75504+ return 0;
75505+ ientry->nentry = nentry;
75506+
75507+ nentry->key = key;
75508+ nentry->name = name;
75509+ nentry->inode = inode;
75510+ nentry->device = device;
75511+ nentry->len = len;
75512+ nentry->deleted = deleted;
75513+
75514+ nentry->prev = NULL;
75515+ curr = &polstate->name_set.n_hash[index];
75516+ if (*curr != NULL)
75517+ (*curr)->prev = nentry;
75518+ nentry->next = *curr;
75519+ *curr = nentry;
75520+
75521+ /* insert us into the table searchable by inode/dev */
75522+ __insert_inodev_entry(polstate, ientry);
75523+
75524+ return 1;
75525+}
75526+
75527+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
75528+
75529+static void *
75530+create_table(__u32 * len, int elementsize)
75531+{
75532+ unsigned int table_sizes[] = {
75533+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
75534+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
75535+ 4194301, 8388593, 16777213, 33554393, 67108859
75536+ };
75537+ void *newtable = NULL;
75538+ unsigned int pwr = 0;
75539+
75540+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
75541+ table_sizes[pwr] <= *len)
75542+ pwr++;
75543+
75544+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
75545+ return newtable;
75546+
75547+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
75548+ newtable =
75549+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
75550+ else
75551+ newtable = vmalloc(table_sizes[pwr] * elementsize);
75552+
75553+ *len = table_sizes[pwr];
75554+
75555+ return newtable;
75556+}
75557+
75558+static int
75559+init_variables(const struct gr_arg *arg, bool reload)
75560+{
75561+ struct task_struct *reaper = init_pid_ns.child_reaper;
75562+ unsigned int stacksize;
75563+
75564+ polstate->subj_map_set.s_size = arg->role_db.num_subjects;
75565+ polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
75566+ polstate->name_set.n_size = arg->role_db.num_objects;
75567+ polstate->inodev_set.i_size = arg->role_db.num_objects;
75568+
75569+ if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
75570+ !polstate->name_set.n_size || !polstate->inodev_set.i_size)
75571+ return 1;
75572+
75573+ if (!reload) {
75574+ if (!gr_init_uidset())
75575+ return 1;
75576+ }
75577+
75578+ /* set up the stack that holds allocation info */
75579+
75580+ stacksize = arg->role_db.num_pointers + 5;
75581+
75582+ if (!acl_alloc_stack_init(stacksize))
75583+ return 1;
75584+
75585+ if (!reload) {
75586+ /* grab reference for the real root dentry and vfsmount */
75587+ get_fs_root(reaper->fs, &gr_real_root);
75588+
75589+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
75590+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
75591+#endif
75592+
75593+ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
75594+ if (fakefs_obj_rw == NULL)
75595+ return 1;
75596+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
75597+
75598+ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
75599+ if (fakefs_obj_rwx == NULL)
75600+ return 1;
75601+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
75602+ }
75603+
75604+ polstate->subj_map_set.s_hash =
75605+ (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
75606+ polstate->acl_role_set.r_hash =
75607+ (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
75608+ polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
75609+ polstate->inodev_set.i_hash =
75610+ (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
75611+
75612+ if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
75613+ !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
75614+ return 1;
75615+
75616+ memset(polstate->subj_map_set.s_hash, 0,
75617+ sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
75618+ memset(polstate->acl_role_set.r_hash, 0,
75619+ sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
75620+ memset(polstate->name_set.n_hash, 0,
75621+ sizeof (struct name_entry *) * polstate->name_set.n_size);
75622+ memset(polstate->inodev_set.i_hash, 0,
75623+ sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
75624+
75625+ return 0;
75626+}
75627+
75628+/* free information not needed after startup
75629+ currently contains user->kernel pointer mappings for subjects
75630+*/
75631+
75632+static void
75633+free_init_variables(void)
75634+{
75635+ __u32 i;
75636+
75637+ if (polstate->subj_map_set.s_hash) {
75638+ for (i = 0; i < polstate->subj_map_set.s_size; i++) {
75639+ if (polstate->subj_map_set.s_hash[i]) {
75640+ kfree(polstate->subj_map_set.s_hash[i]);
75641+ polstate->subj_map_set.s_hash[i] = NULL;
75642+ }
75643+ }
75644+
75645+ if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
75646+ PAGE_SIZE)
75647+ kfree(polstate->subj_map_set.s_hash);
75648+ else
75649+ vfree(polstate->subj_map_set.s_hash);
75650+ }
75651+
75652+ return;
75653+}
75654+
75655+static void
75656+free_variables(bool reload)
75657+{
75658+ struct acl_subject_label *s;
75659+ struct acl_role_label *r;
75660+ struct task_struct *task, *task2;
75661+ unsigned int x;
75662+
75663+ if (!reload) {
75664+ gr_clear_learn_entries();
75665+
75666+ read_lock(&tasklist_lock);
75667+ do_each_thread(task2, task) {
75668+ task->acl_sp_role = 0;
75669+ task->acl_role_id = 0;
75670+ task->inherited = 0;
75671+ task->acl = NULL;
75672+ task->role = NULL;
75673+ } while_each_thread(task2, task);
75674+ read_unlock(&tasklist_lock);
75675+
75676+ kfree(fakefs_obj_rw);
75677+ fakefs_obj_rw = NULL;
75678+ kfree(fakefs_obj_rwx);
75679+ fakefs_obj_rwx = NULL;
75680+
75681+ /* release the reference to the real root dentry and vfsmount */
75682+ path_put(&gr_real_root);
75683+ memset(&gr_real_root, 0, sizeof(gr_real_root));
75684+ }
75685+
75686+ /* free all object hash tables */
75687+
75688+ FOR_EACH_ROLE_START(r)
75689+ if (r->subj_hash == NULL)
75690+ goto next_role;
75691+ FOR_EACH_SUBJECT_START(r, s, x)
75692+ if (s->obj_hash == NULL)
75693+ break;
75694+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
75695+ kfree(s->obj_hash);
75696+ else
75697+ vfree(s->obj_hash);
75698+ FOR_EACH_SUBJECT_END(s, x)
75699+ FOR_EACH_NESTED_SUBJECT_START(r, s)
75700+ if (s->obj_hash == NULL)
75701+ break;
75702+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
75703+ kfree(s->obj_hash);
75704+ else
75705+ vfree(s->obj_hash);
75706+ FOR_EACH_NESTED_SUBJECT_END(s)
75707+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
75708+ kfree(r->subj_hash);
75709+ else
75710+ vfree(r->subj_hash);
75711+ r->subj_hash = NULL;
75712+next_role:
75713+ FOR_EACH_ROLE_END(r)
75714+
75715+ acl_free_all();
75716+
75717+ if (polstate->acl_role_set.r_hash) {
75718+ if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
75719+ PAGE_SIZE)
75720+ kfree(polstate->acl_role_set.r_hash);
75721+ else
75722+ vfree(polstate->acl_role_set.r_hash);
75723+ }
75724+ if (polstate->name_set.n_hash) {
75725+ if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
75726+ PAGE_SIZE)
75727+ kfree(polstate->name_set.n_hash);
75728+ else
75729+ vfree(polstate->name_set.n_hash);
75730+ }
75731+
75732+ if (polstate->inodev_set.i_hash) {
75733+ if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
75734+ PAGE_SIZE)
75735+ kfree(polstate->inodev_set.i_hash);
75736+ else
75737+ vfree(polstate->inodev_set.i_hash);
75738+ }
75739+
75740+ if (!reload)
75741+ gr_free_uidset();
75742+
75743+ memset(&polstate->name_set, 0, sizeof (struct name_db));
75744+ memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
75745+ memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
75746+ memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
75747+
75748+ polstate->default_role = NULL;
75749+ polstate->kernel_role = NULL;
75750+ polstate->role_list = NULL;
75751+
75752+ return;
75753+}
75754+
75755+static struct acl_subject_label *
75756+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
75757+
75758+static int alloc_and_copy_string(char **name, unsigned int maxlen)
75759+{
75760+ unsigned int len = strnlen_user(*name, maxlen);
75761+ char *tmp;
75762+
75763+ if (!len || len >= maxlen)
75764+ return -EINVAL;
75765+
75766+ if ((tmp = (char *) acl_alloc(len)) == NULL)
75767+ return -ENOMEM;
75768+
75769+ if (copy_from_user(tmp, *name, len))
75770+ return -EFAULT;
75771+
75772+ tmp[len-1] = '\0';
75773+ *name = tmp;
75774+
75775+ return 0;
75776+}
75777+
75778+static int
75779+copy_user_glob(struct acl_object_label *obj)
75780+{
75781+ struct acl_object_label *g_tmp, **guser;
75782+ int error;
75783+
75784+ if (obj->globbed == NULL)
75785+ return 0;
75786+
75787+ guser = &obj->globbed;
75788+ while (*guser) {
75789+ g_tmp = (struct acl_object_label *)
75790+ acl_alloc(sizeof (struct acl_object_label));
75791+ if (g_tmp == NULL)
75792+ return -ENOMEM;
75793+
75794+ if (copy_acl_object_label(g_tmp, *guser))
75795+ return -EFAULT;
75796+
75797+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
75798+ if (error)
75799+ return error;
75800+
75801+ *guser = g_tmp;
75802+ guser = &(g_tmp->next);
75803+ }
75804+
75805+ return 0;
75806+}
75807+
75808+static int
75809+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
75810+ struct acl_role_label *role)
75811+{
75812+ struct acl_object_label *o_tmp;
75813+ int ret;
75814+
75815+ while (userp) {
75816+ if ((o_tmp = (struct acl_object_label *)
75817+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
75818+ return -ENOMEM;
75819+
75820+ if (copy_acl_object_label(o_tmp, userp))
75821+ return -EFAULT;
75822+
75823+ userp = o_tmp->prev;
75824+
75825+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
75826+ if (ret)
75827+ return ret;
75828+
75829+ insert_acl_obj_label(o_tmp, subj);
75830+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
75831+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
75832+ return -ENOMEM;
75833+
75834+ ret = copy_user_glob(o_tmp);
75835+ if (ret)
75836+ return ret;
75837+
75838+ if (o_tmp->nested) {
75839+ int already_copied;
75840+
75841+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
75842+ if (IS_ERR(o_tmp->nested))
75843+ return PTR_ERR(o_tmp->nested);
75844+
75845+ /* insert into nested subject list if we haven't copied this one yet
75846+ to prevent duplicate entries */
75847+ if (!already_copied) {
75848+ o_tmp->nested->next = role->hash->first;
75849+ role->hash->first = o_tmp->nested;
75850+ }
75851+ }
75852+ }
75853+
75854+ return 0;
75855+}
75856+
75857+static __u32
75858+count_user_subjs(struct acl_subject_label *userp)
75859+{
75860+ struct acl_subject_label s_tmp;
75861+ __u32 num = 0;
75862+
75863+ while (userp) {
75864+ if (copy_acl_subject_label(&s_tmp, userp))
75865+ break;
75866+
75867+ userp = s_tmp.prev;
75868+ }
75869+
75870+ return num;
75871+}
75872+
75873+static int
75874+copy_user_allowedips(struct acl_role_label *rolep)
75875+{
75876+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
75877+
75878+ ruserip = rolep->allowed_ips;
75879+
75880+ while (ruserip) {
75881+ rlast = rtmp;
75882+
75883+ if ((rtmp = (struct role_allowed_ip *)
75884+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
75885+ return -ENOMEM;
75886+
75887+ if (copy_role_allowed_ip(rtmp, ruserip))
75888+ return -EFAULT;
75889+
75890+ ruserip = rtmp->prev;
75891+
75892+ if (!rlast) {
75893+ rtmp->prev = NULL;
75894+ rolep->allowed_ips = rtmp;
75895+ } else {
75896+ rlast->next = rtmp;
75897+ rtmp->prev = rlast;
75898+ }
75899+
75900+ if (!ruserip)
75901+ rtmp->next = NULL;
75902+ }
75903+
75904+ return 0;
75905+}
75906+
75907+static int
75908+copy_user_transitions(struct acl_role_label *rolep)
75909+{
75910+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
75911+ int error;
75912+
75913+ rusertp = rolep->transitions;
75914+
75915+ while (rusertp) {
75916+ rlast = rtmp;
75917+
75918+ if ((rtmp = (struct role_transition *)
75919+ acl_alloc(sizeof (struct role_transition))) == NULL)
75920+ return -ENOMEM;
75921+
75922+ if (copy_role_transition(rtmp, rusertp))
75923+ return -EFAULT;
75924+
75925+ rusertp = rtmp->prev;
75926+
75927+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
75928+ if (error)
75929+ return error;
75930+
75931+ if (!rlast) {
75932+ rtmp->prev = NULL;
75933+ rolep->transitions = rtmp;
75934+ } else {
75935+ rlast->next = rtmp;
75936+ rtmp->prev = rlast;
75937+ }
75938+
75939+ if (!rusertp)
75940+ rtmp->next = NULL;
75941+ }
75942+
75943+ return 0;
75944+}
75945+
75946+static __u32 count_user_objs(const struct acl_object_label __user *userp)
75947+{
75948+ struct acl_object_label o_tmp;
75949+ __u32 num = 0;
75950+
75951+ while (userp) {
75952+ if (copy_acl_object_label(&o_tmp, userp))
75953+ break;
75954+
75955+ userp = o_tmp.prev;
75956+ num++;
75957+ }
75958+
75959+ return num;
75960+}
75961+
75962+static struct acl_subject_label *
75963+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
75964+{
75965+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
75966+ __u32 num_objs;
75967+ struct acl_ip_label **i_tmp, *i_utmp2;
75968+ struct gr_hash_struct ghash;
75969+ struct subject_map *subjmap;
75970+ unsigned int i_num;
75971+ int err;
75972+
75973+ if (already_copied != NULL)
75974+ *already_copied = 0;
75975+
75976+ s_tmp = lookup_subject_map(userp);
75977+
75978+ /* we've already copied this subject into the kernel, just return
75979+ the reference to it, and don't copy it over again
75980+ */
75981+ if (s_tmp) {
75982+ if (already_copied != NULL)
75983+ *already_copied = 1;
75984+ return(s_tmp);
75985+ }
75986+
75987+ if ((s_tmp = (struct acl_subject_label *)
75988+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
75989+ return ERR_PTR(-ENOMEM);
75990+
75991+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
75992+ if (subjmap == NULL)
75993+ return ERR_PTR(-ENOMEM);
75994+
75995+ subjmap->user = userp;
75996+ subjmap->kernel = s_tmp;
75997+ insert_subj_map_entry(subjmap);
75998+
75999+ if (copy_acl_subject_label(s_tmp, userp))
76000+ return ERR_PTR(-EFAULT);
76001+
76002+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
76003+ if (err)
76004+ return ERR_PTR(err);
76005+
76006+ if (!strcmp(s_tmp->filename, "/"))
76007+ role->root_label = s_tmp;
76008+
76009+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
76010+ return ERR_PTR(-EFAULT);
76011+
76012+ /* copy user and group transition tables */
76013+
76014+ if (s_tmp->user_trans_num) {
76015+ uid_t *uidlist;
76016+
76017+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
76018+ if (uidlist == NULL)
76019+ return ERR_PTR(-ENOMEM);
76020+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
76021+ return ERR_PTR(-EFAULT);
76022+
76023+ s_tmp->user_transitions = uidlist;
76024+ }
76025+
76026+ if (s_tmp->group_trans_num) {
76027+ gid_t *gidlist;
76028+
76029+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
76030+ if (gidlist == NULL)
76031+ return ERR_PTR(-ENOMEM);
76032+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
76033+ return ERR_PTR(-EFAULT);
76034+
76035+ s_tmp->group_transitions = gidlist;
76036+ }
76037+
76038+ /* set up object hash table */
76039+ num_objs = count_user_objs(ghash.first);
76040+
76041+ s_tmp->obj_hash_size = num_objs;
76042+ s_tmp->obj_hash =
76043+ (struct acl_object_label **)
76044+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
76045+
76046+ if (!s_tmp->obj_hash)
76047+ return ERR_PTR(-ENOMEM);
76048+
76049+ memset(s_tmp->obj_hash, 0,
76050+ s_tmp->obj_hash_size *
76051+ sizeof (struct acl_object_label *));
76052+
76053+ /* add in objects */
76054+ err = copy_user_objs(ghash.first, s_tmp, role);
76055+
76056+ if (err)
76057+ return ERR_PTR(err);
76058+
76059+ /* set pointer for parent subject */
76060+ if (s_tmp->parent_subject) {
76061+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
76062+
76063+ if (IS_ERR(s_tmp2))
76064+ return s_tmp2;
76065+
76066+ s_tmp->parent_subject = s_tmp2;
76067+ }
76068+
76069+ /* add in ip acls */
76070+
76071+ if (!s_tmp->ip_num) {
76072+ s_tmp->ips = NULL;
76073+ goto insert;
76074+ }
76075+
76076+ i_tmp =
76077+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
76078+ sizeof (struct acl_ip_label *));
76079+
76080+ if (!i_tmp)
76081+ return ERR_PTR(-ENOMEM);
76082+
76083+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
76084+ *(i_tmp + i_num) =
76085+ (struct acl_ip_label *)
76086+ acl_alloc(sizeof (struct acl_ip_label));
76087+ if (!*(i_tmp + i_num))
76088+ return ERR_PTR(-ENOMEM);
76089+
76090+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
76091+ return ERR_PTR(-EFAULT);
76092+
76093+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
76094+ return ERR_PTR(-EFAULT);
76095+
76096+ if ((*(i_tmp + i_num))->iface == NULL)
76097+ continue;
76098+
76099+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
76100+ if (err)
76101+ return ERR_PTR(err);
76102+ }
76103+
76104+ s_tmp->ips = i_tmp;
76105+
76106+insert:
76107+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
76108+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
76109+ return ERR_PTR(-ENOMEM);
76110+
76111+ return s_tmp;
76112+}
76113+
76114+static int
76115+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
76116+{
76117+ struct acl_subject_label s_pre;
76118+ struct acl_subject_label * ret;
76119+ int err;
76120+
76121+ while (userp) {
76122+ if (copy_acl_subject_label(&s_pre, userp))
76123+ return -EFAULT;
76124+
76125+ ret = do_copy_user_subj(userp, role, NULL);
76126+
76127+ err = PTR_ERR(ret);
76128+ if (IS_ERR(ret))
76129+ return err;
76130+
76131+ insert_acl_subj_label(ret, role);
76132+
76133+ userp = s_pre.prev;
76134+ }
76135+
76136+ return 0;
76137+}
76138+
76139+static int
76140+copy_user_acl(struct gr_arg *arg)
76141+{
76142+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
76143+ struct acl_subject_label *subj_list;
76144+ struct sprole_pw *sptmp;
76145+ struct gr_hash_struct *ghash;
76146+ uid_t *domainlist;
76147+ unsigned int r_num;
76148+ int err = 0;
76149+ __u16 i;
76150+ __u32 num_subjs;
76151+
76152+ /* we need a default and kernel role */
76153+ if (arg->role_db.num_roles < 2)
76154+ return -EINVAL;
76155+
76156+ /* copy special role authentication info from userspace */
76157+
76158+ polstate->num_sprole_pws = arg->num_sprole_pws;
76159+ polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
76160+
76161+ if (!polstate->acl_special_roles && polstate->num_sprole_pws)
76162+ return -ENOMEM;
76163+
76164+ for (i = 0; i < polstate->num_sprole_pws; i++) {
76165+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
76166+ if (!sptmp)
76167+ return -ENOMEM;
76168+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
76169+ return -EFAULT;
76170+
76171+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
76172+ if (err)
76173+ return err;
76174+
76175+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
76176+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
76177+#endif
76178+
76179+ polstate->acl_special_roles[i] = sptmp;
76180+ }
76181+
76182+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
76183+
76184+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
76185+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
76186+
76187+ if (!r_tmp)
76188+ return -ENOMEM;
76189+
76190+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
76191+ return -EFAULT;
76192+
76193+ if (copy_acl_role_label(r_tmp, r_utmp2))
76194+ return -EFAULT;
76195+
76196+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
76197+ if (err)
76198+ return err;
76199+
76200+ if (!strcmp(r_tmp->rolename, "default")
76201+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
76202+ polstate->default_role = r_tmp;
76203+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
76204+ polstate->kernel_role = r_tmp;
76205+ }
76206+
76207+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
76208+ return -ENOMEM;
76209+
76210+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
76211+ return -EFAULT;
76212+
76213+ r_tmp->hash = ghash;
76214+
76215+ num_subjs = count_user_subjs(r_tmp->hash->first);
76216+
76217+ r_tmp->subj_hash_size = num_subjs;
76218+ r_tmp->subj_hash =
76219+ (struct acl_subject_label **)
76220+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
76221+
76222+ if (!r_tmp->subj_hash)
76223+ return -ENOMEM;
76224+
76225+ err = copy_user_allowedips(r_tmp);
76226+ if (err)
76227+ return err;
76228+
76229+ /* copy domain info */
76230+ if (r_tmp->domain_children != NULL) {
76231+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
76232+ if (domainlist == NULL)
76233+ return -ENOMEM;
76234+
76235+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
76236+ return -EFAULT;
76237+
76238+ r_tmp->domain_children = domainlist;
76239+ }
76240+
76241+ err = copy_user_transitions(r_tmp);
76242+ if (err)
76243+ return err;
76244+
76245+ memset(r_tmp->subj_hash, 0,
76246+ r_tmp->subj_hash_size *
76247+ sizeof (struct acl_subject_label *));
76248+
76249+ /* acquire the list of subjects, then NULL out
76250+ the list prior to parsing the subjects for this role,
76251+ as during this parsing the list is replaced with a list
76252+ of *nested* subjects for the role
76253+ */
76254+ subj_list = r_tmp->hash->first;
76255+
76256+ /* set nested subject list to null */
76257+ r_tmp->hash->first = NULL;
76258+
76259+ err = copy_user_subjs(subj_list, r_tmp);
76260+
76261+ if (err)
76262+ return err;
76263+
76264+ insert_acl_role_label(r_tmp);
76265+ }
76266+
76267+ if (polstate->default_role == NULL || polstate->kernel_role == NULL)
76268+ return -EINVAL;
76269+
76270+ return err;
76271+}
76272+
76273+static int gracl_reload_apply_policies(void *reload)
76274+{
76275+ struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
76276+ struct task_struct *task, *task2;
76277+ struct acl_role_label *role, *rtmp;
76278+ struct acl_subject_label *subj;
76279+ const struct cred *cred;
76280+ int role_applied;
76281+ int ret = 0;
76282+
76283+ memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
76284+ memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
76285+
76286+ /* first make sure we'll be able to apply the new policy cleanly */
76287+ do_each_thread(task2, task) {
76288+ if (task->exec_file == NULL)
76289+ continue;
76290+ role_applied = 0;
76291+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
76292+ /* preserve special roles */
76293+ FOR_EACH_ROLE_START(role)
76294+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
76295+ rtmp = task->role;
76296+ task->role = role;
76297+ role_applied = 1;
76298+ break;
76299+ }
76300+ FOR_EACH_ROLE_END(role)
76301+ }
76302+ if (!role_applied) {
76303+ cred = __task_cred(task);
76304+ rtmp = task->role;
76305+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
76306+ }
76307+ /* this handles non-nested inherited subjects, nested subjects will still
76308+ be dropped currently */
76309+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
76310+ task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL, 1);
76311+ /* change the role back so that we've made no modifications to the policy */
76312+ task->role = rtmp;
76313+
76314+ if (subj == NULL || task->tmpacl == NULL) {
76315+ ret = -EINVAL;
76316+ goto out;
76317+ }
76318+ } while_each_thread(task2, task);
76319+
76320+ /* now actually apply the policy */
76321+
76322+ do_each_thread(task2, task) {
76323+ if (task->exec_file) {
76324+ role_applied = 0;
76325+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
76326+ /* preserve special roles */
76327+ FOR_EACH_ROLE_START(role)
76328+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
76329+ task->role = role;
76330+ role_applied = 1;
76331+ break;
76332+ }
76333+ FOR_EACH_ROLE_END(role)
76334+ }
76335+ if (!role_applied) {
76336+ cred = __task_cred(task);
76337+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
76338+ }
76339+ /* this handles non-nested inherited subjects, nested subjects will still
76340+ be dropped currently */
76341+ if (!reload_state->oldmode && task->inherited)
76342+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
76343+ else {
76344+ /* looked up and tagged to the task previously */
76345+ subj = task->tmpacl;
76346+ }
76347+ /* subj will be non-null */
76348+ __gr_apply_subject_to_task(polstate, task, subj);
76349+ if (reload_state->oldmode) {
76350+ task->acl_role_id = 0;
76351+ task->acl_sp_role = 0;
76352+ task->inherited = 0;
76353+ }
76354+ } else {
76355+ // it's a kernel process
76356+ task->role = polstate->kernel_role;
76357+ task->acl = polstate->kernel_role->root_label;
76358+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
76359+ task->acl->mode &= ~GR_PROCFIND;
76360+#endif
76361+ }
76362+ } while_each_thread(task2, task);
76363+
76364+ memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
76365+ memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
76366+
76367+out:
76368+
76369+ return ret;
76370+}
76371+
76372+static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
76373+{
76374+ struct gr_reload_state new_reload_state = { };
76375+ int err;
76376+
76377+ new_reload_state.oldpolicy_ptr = polstate;
76378+ new_reload_state.oldalloc_ptr = current_alloc_state;
76379+ new_reload_state.oldmode = oldmode;
76380+
76381+ current_alloc_state = &new_reload_state.newalloc;
76382+ polstate = &new_reload_state.newpolicy;
76383+
76384+ /* everything relevant is now saved off, copy in the new policy */
76385+ if (init_variables(args, true)) {
76386+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
76387+ err = -ENOMEM;
76388+ goto error;
76389+ }
76390+
76391+ err = copy_user_acl(args);
76392+ free_init_variables();
76393+ if (err)
76394+ goto error;
76395+ /* the new policy is copied in, with the old policy available via saved_state
76396+ first go through applying roles, making sure to preserve special roles
76397+ then apply new subjects, making sure to preserve inherited and nested subjects,
76398+ though currently only inherited subjects will be preserved
76399+ */
76400+ err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
76401+ if (err)
76402+ goto error;
76403+
76404+ /* we've now applied the new policy, so restore the old policy state to free it */
76405+ polstate = &new_reload_state.oldpolicy;
76406+ current_alloc_state = &new_reload_state.oldalloc;
76407+ free_variables(true);
76408+
76409+ /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
76410+ to running_polstate/current_alloc_state inside stop_machine
76411+ */
76412+ err = 0;
76413+ goto out;
76414+error:
76415+ /* on error of loading the new policy, we'll just keep the previous
76416+ policy set around
76417+ */
76418+ free_variables(true);
76419+
76420+ /* doesn't affect runtime, but maintains consistent state */
76421+out:
76422+ polstate = new_reload_state.oldpolicy_ptr;
76423+ current_alloc_state = new_reload_state.oldalloc_ptr;
76424+
76425+ return err;
76426+}
76427+
76428+static int
76429+gracl_init(struct gr_arg *args)
76430+{
76431+ int error = 0;
76432+
76433+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
76434+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
76435+
76436+ if (init_variables(args, false)) {
76437+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
76438+ error = -ENOMEM;
76439+ goto out;
76440+ }
76441+
76442+ error = copy_user_acl(args);
76443+ free_init_variables();
76444+ if (error)
76445+ goto out;
76446+
76447+ error = gr_set_acls(0);
76448+ if (error)
76449+ goto out;
76450+
76451+ gr_enable_rbac_system();
76452+
76453+ return 0;
76454+
76455+out:
76456+ free_variables(false);
76457+ return error;
76458+}
76459+
76460+static int
76461+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
76462+ unsigned char **sum)
76463+{
76464+ struct acl_role_label *r;
76465+ struct role_allowed_ip *ipp;
76466+ struct role_transition *trans;
76467+ unsigned int i;
76468+ int found = 0;
76469+ u32 curr_ip = current->signal->curr_ip;
76470+
76471+ current->signal->saved_ip = curr_ip;
76472+
76473+ /* check transition table */
76474+
76475+ for (trans = current->role->transitions; trans; trans = trans->next) {
76476+ if (!strcmp(rolename, trans->rolename)) {
76477+ found = 1;
76478+ break;
76479+ }
76480+ }
76481+
76482+ if (!found)
76483+ return 0;
76484+
76485+ /* handle special roles that do not require authentication
76486+ and check ip */
76487+
76488+ FOR_EACH_ROLE_START(r)
76489+ if (!strcmp(rolename, r->rolename) &&
76490+ (r->roletype & GR_ROLE_SPECIAL)) {
76491+ found = 0;
76492+ if (r->allowed_ips != NULL) {
76493+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
76494+ if ((ntohl(curr_ip) & ipp->netmask) ==
76495+ (ntohl(ipp->addr) & ipp->netmask))
76496+ found = 1;
76497+ }
76498+ } else
76499+ found = 2;
76500+ if (!found)
76501+ return 0;
76502+
76503+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
76504+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
76505+ *salt = NULL;
76506+ *sum = NULL;
76507+ return 1;
76508+ }
76509+ }
76510+ FOR_EACH_ROLE_END(r)
76511+
76512+ for (i = 0; i < polstate->num_sprole_pws; i++) {
76513+ if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
76514+ *salt = polstate->acl_special_roles[i]->salt;
76515+ *sum = polstate->acl_special_roles[i]->sum;
76516+ return 1;
76517+ }
76518+ }
76519+
76520+ return 0;
76521+}
76522+
76523+int gr_check_secure_terminal(struct task_struct *task)
76524+{
76525+ struct task_struct *p, *p2, *p3;
76526+ struct files_struct *files;
76527+ struct fdtable *fdt;
76528+ struct file *our_file = NULL, *file;
76529+ int i;
76530+
76531+ if (task->signal->tty == NULL)
76532+ return 1;
76533+
76534+ files = get_files_struct(task);
76535+ if (files != NULL) {
76536+ rcu_read_lock();
76537+ fdt = files_fdtable(files);
76538+ for (i=0; i < fdt->max_fds; i++) {
76539+ file = fcheck_files(files, i);
76540+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
76541+ get_file(file);
76542+ our_file = file;
76543+ }
76544+ }
76545+ rcu_read_unlock();
76546+ put_files_struct(files);
76547+ }
76548+
76549+ if (our_file == NULL)
76550+ return 1;
76551+
76552+ read_lock(&tasklist_lock);
76553+ do_each_thread(p2, p) {
76554+ files = get_files_struct(p);
76555+ if (files == NULL ||
76556+ (p->signal && p->signal->tty == task->signal->tty)) {
76557+ if (files != NULL)
76558+ put_files_struct(files);
76559+ continue;
76560+ }
76561+ rcu_read_lock();
76562+ fdt = files_fdtable(files);
76563+ for (i=0; i < fdt->max_fds; i++) {
76564+ file = fcheck_files(files, i);
76565+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
76566+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
76567+ p3 = task;
76568+ while (task_pid_nr(p3) > 0) {
76569+ if (p3 == p)
76570+ break;
76571+ p3 = p3->real_parent;
76572+ }
76573+ if (p3 == p)
76574+ break;
76575+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
76576+ gr_handle_alertkill(p);
76577+ rcu_read_unlock();
76578+ put_files_struct(files);
76579+ read_unlock(&tasklist_lock);
76580+ fput(our_file);
76581+ return 0;
76582+ }
76583+ }
76584+ rcu_read_unlock();
76585+ put_files_struct(files);
76586+ } while_each_thread(p2, p);
76587+ read_unlock(&tasklist_lock);
76588+
76589+ fput(our_file);
76590+ return 1;
76591+}
76592+
76593+ssize_t
76594+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
76595+{
76596+ struct gr_arg_wrapper uwrap;
76597+ unsigned char *sprole_salt = NULL;
76598+ unsigned char *sprole_sum = NULL;
76599+ int error = 0;
76600+ int error2 = 0;
76601+ size_t req_count = 0;
76602+ unsigned char oldmode = 0;
76603+
76604+ mutex_lock(&gr_dev_mutex);
76605+
76606+ if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
76607+ error = -EPERM;
76608+ goto out;
76609+ }
76610+
76611+#ifdef CONFIG_COMPAT
76612+ pax_open_kernel();
76613+ if (is_compat_task()) {
76614+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
76615+ copy_gr_arg = &copy_gr_arg_compat;
76616+ copy_acl_object_label = &copy_acl_object_label_compat;
76617+ copy_acl_subject_label = &copy_acl_subject_label_compat;
76618+ copy_acl_role_label = &copy_acl_role_label_compat;
76619+ copy_acl_ip_label = &copy_acl_ip_label_compat;
76620+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
76621+ copy_role_transition = &copy_role_transition_compat;
76622+ copy_sprole_pw = &copy_sprole_pw_compat;
76623+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
76624+ copy_pointer_from_array = &copy_pointer_from_array_compat;
76625+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
76626+ } else {
76627+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
76628+ copy_gr_arg = &copy_gr_arg_normal;
76629+ copy_acl_object_label = &copy_acl_object_label_normal;
76630+ copy_acl_subject_label = &copy_acl_subject_label_normal;
76631+ copy_acl_role_label = &copy_acl_role_label_normal;
76632+ copy_acl_ip_label = &copy_acl_ip_label_normal;
76633+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
76634+ copy_role_transition = &copy_role_transition_normal;
76635+ copy_sprole_pw = &copy_sprole_pw_normal;
76636+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
76637+ copy_pointer_from_array = &copy_pointer_from_array_normal;
76638+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
76639+ }
76640+ pax_close_kernel();
76641+#endif
76642+
76643+ req_count = get_gr_arg_wrapper_size();
76644+
76645+ if (count != req_count) {
76646+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
76647+ error = -EINVAL;
76648+ goto out;
76649+ }
76650+
76651+
76652+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
76653+ gr_auth_expires = 0;
76654+ gr_auth_attempts = 0;
76655+ }
76656+
76657+ error = copy_gr_arg_wrapper(buf, &uwrap);
76658+ if (error)
76659+ goto out;
76660+
76661+ error = copy_gr_arg(uwrap.arg, gr_usermode);
76662+ if (error)
76663+ goto out;
76664+
76665+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
76666+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
76667+ time_after(gr_auth_expires, get_seconds())) {
76668+ error = -EBUSY;
76669+ goto out;
76670+ }
76671+
76672+ /* if non-root trying to do anything other than use a special role,
76673+ do not attempt authentication, do not count towards authentication
76674+ locking
76675+ */
76676+
76677+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
76678+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
76679+ gr_is_global_nonroot(current_uid())) {
76680+ error = -EPERM;
76681+ goto out;
76682+ }
76683+
76684+ /* ensure pw and special role name are null terminated */
76685+
76686+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
76687+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
76688+
76689+ /* Okay.
76690+ * We have our enough of the argument structure..(we have yet
76691+ * to copy_from_user the tables themselves) . Copy the tables
76692+ * only if we need them, i.e. for loading operations. */
76693+
76694+ switch (gr_usermode->mode) {
76695+ case GR_STATUS:
76696+ if (gr_acl_is_enabled()) {
76697+ error = 1;
76698+ if (!gr_check_secure_terminal(current))
76699+ error = 3;
76700+ } else
76701+ error = 2;
76702+ goto out;
76703+ case GR_SHUTDOWN:
76704+ if (gr_acl_is_enabled() && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
76705+ stop_machine(gr_rbac_disable, NULL, NULL);
76706+ free_variables(false);
76707+ memset(gr_usermode, 0, sizeof(struct gr_arg));
76708+ memset(gr_system_salt, 0, GR_SALT_LEN);
76709+ memset(gr_system_sum, 0, GR_SHA_LEN);
76710+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
76711+ } else if (gr_acl_is_enabled()) {
76712+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
76713+ error = -EPERM;
76714+ } else {
76715+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
76716+ error = -EAGAIN;
76717+ }
76718+ break;
76719+ case GR_ENABLE:
76720+ if (!gr_acl_is_enabled() && !(error2 = gracl_init(gr_usermode)))
76721+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
76722+ else {
76723+ if (gr_acl_is_enabled())
76724+ error = -EAGAIN;
76725+ else
76726+ error = error2;
76727+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
76728+ }
76729+ break;
76730+ case GR_OLDRELOAD:
76731+ oldmode = 1;
76732+ case GR_RELOAD:
76733+ if (!gr_acl_is_enabled()) {
76734+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
76735+ error = -EAGAIN;
76736+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
76737+ error2 = gracl_reload(gr_usermode, oldmode);
76738+ if (!error2)
76739+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
76740+ else {
76741+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
76742+ error = error2;
76743+ }
76744+ } else {
76745+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
76746+ error = -EPERM;
76747+ }
76748+ break;
76749+ case GR_SEGVMOD:
76750+ if (unlikely(!gr_acl_is_enabled())) {
76751+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
76752+ error = -EAGAIN;
76753+ break;
76754+ }
76755+
76756+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
76757+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
76758+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
76759+ struct acl_subject_label *segvacl;
76760+ segvacl =
76761+ lookup_acl_subj_label(gr_usermode->segv_inode,
76762+ gr_usermode->segv_device,
76763+ current->role);
76764+ if (segvacl) {
76765+ segvacl->crashes = 0;
76766+ segvacl->expires = 0;
76767+ }
76768+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
76769+ gr_remove_uid(gr_usermode->segv_uid);
76770+ }
76771+ } else {
76772+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
76773+ error = -EPERM;
76774+ }
76775+ break;
76776+ case GR_SPROLE:
76777+ case GR_SPROLEPAM:
76778+ if (unlikely(!gr_acl_is_enabled())) {
76779+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
76780+ error = -EAGAIN;
76781+ break;
76782+ }
76783+
76784+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
76785+ current->role->expires = 0;
76786+ current->role->auth_attempts = 0;
76787+ }
76788+
76789+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
76790+ time_after(current->role->expires, get_seconds())) {
76791+ error = -EBUSY;
76792+ goto out;
76793+ }
76794+
76795+ if (lookup_special_role_auth
76796+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
76797+ && ((!sprole_salt && !sprole_sum)
76798+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
76799+ char *p = "";
76800+ assign_special_role(gr_usermode->sp_role);
76801+ read_lock(&tasklist_lock);
76802+ if (current->real_parent)
76803+ p = current->real_parent->role->rolename;
76804+ read_unlock(&tasklist_lock);
76805+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
76806+ p, acl_sp_role_value);
76807+ } else {
76808+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
76809+ error = -EPERM;
76810+ if(!(current->role->auth_attempts++))
76811+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
76812+
76813+ goto out;
76814+ }
76815+ break;
76816+ case GR_UNSPROLE:
76817+ if (unlikely(!gr_acl_is_enabled())) {
76818+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
76819+ error = -EAGAIN;
76820+ break;
76821+ }
76822+
76823+ if (current->role->roletype & GR_ROLE_SPECIAL) {
76824+ char *p = "";
76825+ int i = 0;
76826+
76827+ read_lock(&tasklist_lock);
76828+ if (current->real_parent) {
76829+ p = current->real_parent->role->rolename;
76830+ i = current->real_parent->acl_role_id;
76831+ }
76832+ read_unlock(&tasklist_lock);
76833+
76834+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
76835+ gr_set_acls(1);
76836+ } else {
76837+ error = -EPERM;
76838+ goto out;
76839+ }
76840+ break;
76841+ default:
76842+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
76843+ error = -EINVAL;
76844+ break;
76845+ }
76846+
76847+ if (error != -EPERM)
76848+ goto out;
76849+
76850+ if(!(gr_auth_attempts++))
76851+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
76852+
76853+ out:
76854+ mutex_unlock(&gr_dev_mutex);
76855+
76856+ if (!error)
76857+ error = req_count;
76858+
76859+ return error;
76860+}
76861+
76862+int
76863+gr_set_acls(const int type)
76864+{
76865+ struct task_struct *task, *task2;
76866+ struct acl_role_label *role = current->role;
76867+ struct acl_subject_label *subj;
76868+ __u16 acl_role_id = current->acl_role_id;
76869+ const struct cred *cred;
76870+ int ret;
76871+
76872+ rcu_read_lock();
76873+ read_lock(&tasklist_lock);
76874+ read_lock(&grsec_exec_file_lock);
76875+ do_each_thread(task2, task) {
76876+ /* check to see if we're called from the exit handler,
76877+ if so, only replace ACLs that have inherited the admin
76878+ ACL */
76879+
76880+ if (type && (task->role != role ||
76881+ task->acl_role_id != acl_role_id))
76882+ continue;
76883+
76884+ task->acl_role_id = 0;
76885+ task->acl_sp_role = 0;
76886+ task->inherited = 0;
76887+
76888+ if (task->exec_file) {
76889+ cred = __task_cred(task);
76890+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
76891+ subj = __gr_get_subject_for_task(polstate, task, NULL, 1);
76892+ if (subj == NULL) {
76893+ ret = -EINVAL;
76894+ read_unlock(&grsec_exec_file_lock);
76895+ read_unlock(&tasklist_lock);
76896+ rcu_read_unlock();
76897+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
76898+ return ret;
76899+ }
76900+ __gr_apply_subject_to_task(polstate, task, subj);
76901+ } else {
76902+ // it's a kernel process
76903+ task->role = polstate->kernel_role;
76904+ task->acl = polstate->kernel_role->root_label;
76905+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
76906+ task->acl->mode &= ~GR_PROCFIND;
76907+#endif
76908+ }
76909+ } while_each_thread(task2, task);
76910+ read_unlock(&grsec_exec_file_lock);
76911+ read_unlock(&tasklist_lock);
76912+ rcu_read_unlock();
76913+
76914+ return 0;
76915+}
76916diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
76917new file mode 100644
76918index 0000000..39645c9
76919--- /dev/null
76920+++ b/grsecurity/gracl_res.c
76921@@ -0,0 +1,68 @@
76922+#include <linux/kernel.h>
76923+#include <linux/sched.h>
76924+#include <linux/gracl.h>
76925+#include <linux/grinternal.h>
76926+
76927+static const char *restab_log[] = {
76928+ [RLIMIT_CPU] = "RLIMIT_CPU",
76929+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
76930+ [RLIMIT_DATA] = "RLIMIT_DATA",
76931+ [RLIMIT_STACK] = "RLIMIT_STACK",
76932+ [RLIMIT_CORE] = "RLIMIT_CORE",
76933+ [RLIMIT_RSS] = "RLIMIT_RSS",
76934+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
76935+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
76936+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
76937+ [RLIMIT_AS] = "RLIMIT_AS",
76938+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
76939+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
76940+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
76941+ [RLIMIT_NICE] = "RLIMIT_NICE",
76942+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
76943+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
76944+ [GR_CRASH_RES] = "RLIMIT_CRASH"
76945+};
76946+
76947+void
76948+gr_log_resource(const struct task_struct *task,
76949+ const int res, const unsigned long wanted, const int gt)
76950+{
76951+ const struct cred *cred;
76952+ unsigned long rlim;
76953+
76954+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
76955+ return;
76956+
76957+ // not yet supported resource
76958+ if (unlikely(!restab_log[res]))
76959+ return;
76960+
76961+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
76962+ rlim = task_rlimit_max(task, res);
76963+ else
76964+ rlim = task_rlimit(task, res);
76965+
76966+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
76967+ return;
76968+
76969+ rcu_read_lock();
76970+ cred = __task_cred(task);
76971+
76972+ if (res == RLIMIT_NPROC &&
76973+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
76974+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
76975+ goto out_rcu_unlock;
76976+ else if (res == RLIMIT_MEMLOCK &&
76977+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
76978+ goto out_rcu_unlock;
76979+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
76980+ goto out_rcu_unlock;
76981+ rcu_read_unlock();
76982+
76983+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
76984+
76985+ return;
76986+out_rcu_unlock:
76987+ rcu_read_unlock();
76988+ return;
76989+}
76990diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
76991new file mode 100644
76992index 0000000..35d9e65
76993--- /dev/null
76994+++ b/grsecurity/gracl_segv.c
76995@@ -0,0 +1,324 @@
76996+#include <linux/kernel.h>
76997+#include <linux/mm.h>
76998+#include <asm/uaccess.h>
76999+#include <asm/errno.h>
77000+#include <asm/mman.h>
77001+#include <net/sock.h>
77002+#include <linux/file.h>
77003+#include <linux/fs.h>
77004+#include <linux/net.h>
77005+#include <linux/in.h>
77006+#include <linux/slab.h>
77007+#include <linux/types.h>
77008+#include <linux/sched.h>
77009+#include <linux/timer.h>
77010+#include <linux/gracl.h>
77011+#include <linux/grsecurity.h>
77012+#include <linux/grinternal.h>
77013+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
77014+#include <linux/magic.h>
77015+#include <linux/pagemap.h>
77016+#include "../fs/btrfs/async-thread.h"
77017+#include "../fs/btrfs/ctree.h"
77018+#include "../fs/btrfs/btrfs_inode.h"
77019+#endif
77020+
77021+static struct crash_uid *uid_set;
77022+static unsigned short uid_used;
77023+static DEFINE_SPINLOCK(gr_uid_lock);
77024+extern rwlock_t gr_inode_lock;
77025+extern struct acl_subject_label *
77026+ lookup_acl_subj_label(const u64 inode, const dev_t dev,
77027+ struct acl_role_label *role);
77028+
77029+static inline dev_t __get_dev(const struct dentry *dentry)
77030+{
77031+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
77032+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
77033+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
77034+ else
77035+#endif
77036+ return dentry->d_sb->s_dev;
77037+}
77038+
77039+static inline u64 __get_ino(const struct dentry *dentry)
77040+{
77041+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
77042+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
77043+ return btrfs_ino(dentry->d_inode);
77044+ else
77045+#endif
77046+ return dentry->d_inode->i_ino;
77047+}
77048+
77049+int
77050+gr_init_uidset(void)
77051+{
77052+ uid_set =
77053+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
77054+ uid_used = 0;
77055+
77056+ return uid_set ? 1 : 0;
77057+}
77058+
77059+void
77060+gr_free_uidset(void)
77061+{
77062+ if (uid_set) {
77063+ struct crash_uid *tmpset;
77064+ spin_lock(&gr_uid_lock);
77065+ tmpset = uid_set;
77066+ uid_set = NULL;
77067+ uid_used = 0;
77068+ spin_unlock(&gr_uid_lock);
77069+ if (tmpset)
77070+ kfree(tmpset);
77071+ }
77072+
77073+ return;
77074+}
77075+
77076+int
77077+gr_find_uid(const uid_t uid)
77078+{
77079+ struct crash_uid *tmp = uid_set;
77080+ uid_t buid;
77081+ int low = 0, high = uid_used - 1, mid;
77082+
77083+ while (high >= low) {
77084+ mid = (low + high) >> 1;
77085+ buid = tmp[mid].uid;
77086+ if (buid == uid)
77087+ return mid;
77088+ if (buid > uid)
77089+ high = mid - 1;
77090+ if (buid < uid)
77091+ low = mid + 1;
77092+ }
77093+
77094+ return -1;
77095+}
77096+
77097+static void
77098+gr_insertsort(void)
77099+{
77100+ unsigned short i, j;
77101+ struct crash_uid index;
77102+
77103+ for (i = 1; i < uid_used; i++) {
77104+ index = uid_set[i];
77105+ j = i;
77106+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
77107+ uid_set[j] = uid_set[j - 1];
77108+ j--;
77109+ }
77110+ uid_set[j] = index;
77111+ }
77112+
77113+ return;
77114+}
77115+
77116+static void
77117+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
77118+{
77119+ int loc;
77120+ uid_t uid = GR_GLOBAL_UID(kuid);
77121+
77122+ if (uid_used == GR_UIDTABLE_MAX)
77123+ return;
77124+
77125+ loc = gr_find_uid(uid);
77126+
77127+ if (loc >= 0) {
77128+ uid_set[loc].expires = expires;
77129+ return;
77130+ }
77131+
77132+ uid_set[uid_used].uid = uid;
77133+ uid_set[uid_used].expires = expires;
77134+ uid_used++;
77135+
77136+ gr_insertsort();
77137+
77138+ return;
77139+}
77140+
77141+void
77142+gr_remove_uid(const unsigned short loc)
77143+{
77144+ unsigned short i;
77145+
77146+ for (i = loc + 1; i < uid_used; i++)
77147+ uid_set[i - 1] = uid_set[i];
77148+
77149+ uid_used--;
77150+
77151+ return;
77152+}
77153+
77154+int
77155+gr_check_crash_uid(const kuid_t kuid)
77156+{
77157+ int loc;
77158+ int ret = 0;
77159+ uid_t uid;
77160+
77161+ if (unlikely(!gr_acl_is_enabled()))
77162+ return 0;
77163+
77164+ uid = GR_GLOBAL_UID(kuid);
77165+
77166+ spin_lock(&gr_uid_lock);
77167+ loc = gr_find_uid(uid);
77168+
77169+ if (loc < 0)
77170+ goto out_unlock;
77171+
77172+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
77173+ gr_remove_uid(loc);
77174+ else
77175+ ret = 1;
77176+
77177+out_unlock:
77178+ spin_unlock(&gr_uid_lock);
77179+ return ret;
77180+}
77181+
77182+static int
77183+proc_is_setxid(const struct cred *cred)
77184+{
77185+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
77186+ !uid_eq(cred->uid, cred->fsuid))
77187+ return 1;
77188+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
77189+ !gid_eq(cred->gid, cred->fsgid))
77190+ return 1;
77191+
77192+ return 0;
77193+}
77194+
77195+extern int gr_fake_force_sig(int sig, struct task_struct *t);
77196+
77197+void
77198+gr_handle_crash(struct task_struct *task, const int sig)
77199+{
77200+ struct acl_subject_label *curr;
77201+ struct task_struct *tsk, *tsk2;
77202+ const struct cred *cred;
77203+ const struct cred *cred2;
77204+
77205+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
77206+ return;
77207+
77208+ if (unlikely(!gr_acl_is_enabled()))
77209+ return;
77210+
77211+ curr = task->acl;
77212+
77213+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
77214+ return;
77215+
77216+ if (time_before_eq(curr->expires, get_seconds())) {
77217+ curr->expires = 0;
77218+ curr->crashes = 0;
77219+ }
77220+
77221+ curr->crashes++;
77222+
77223+ if (!curr->expires)
77224+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
77225+
77226+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
77227+ time_after(curr->expires, get_seconds())) {
77228+ rcu_read_lock();
77229+ cred = __task_cred(task);
77230+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
77231+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
77232+ spin_lock(&gr_uid_lock);
77233+ gr_insert_uid(cred->uid, curr->expires);
77234+ spin_unlock(&gr_uid_lock);
77235+ curr->expires = 0;
77236+ curr->crashes = 0;
77237+ read_lock(&tasklist_lock);
77238+ do_each_thread(tsk2, tsk) {
77239+ cred2 = __task_cred(tsk);
77240+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
77241+ gr_fake_force_sig(SIGKILL, tsk);
77242+ } while_each_thread(tsk2, tsk);
77243+ read_unlock(&tasklist_lock);
77244+ } else {
77245+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
77246+ read_lock(&tasklist_lock);
77247+ read_lock(&grsec_exec_file_lock);
77248+ do_each_thread(tsk2, tsk) {
77249+ if (likely(tsk != task)) {
77250+ // if this thread has the same subject as the one that triggered
77251+ // RES_CRASH and it's the same binary, kill it
77252+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
77253+ gr_fake_force_sig(SIGKILL, tsk);
77254+ }
77255+ } while_each_thread(tsk2, tsk);
77256+ read_unlock(&grsec_exec_file_lock);
77257+ read_unlock(&tasklist_lock);
77258+ }
77259+ rcu_read_unlock();
77260+ }
77261+
77262+ return;
77263+}
77264+
77265+int
77266+gr_check_crash_exec(const struct file *filp)
77267+{
77268+ struct acl_subject_label *curr;
77269+ struct dentry *dentry;
77270+
77271+ if (unlikely(!gr_acl_is_enabled()))
77272+ return 0;
77273+
77274+ read_lock(&gr_inode_lock);
77275+ dentry = filp->f_path.dentry;
77276+ curr = lookup_acl_subj_label(__get_ino(dentry), __get_dev(dentry),
77277+ current->role);
77278+ read_unlock(&gr_inode_lock);
77279+
77280+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
77281+ (!curr->crashes && !curr->expires))
77282+ return 0;
77283+
77284+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
77285+ time_after(curr->expires, get_seconds()))
77286+ return 1;
77287+ else if (time_before_eq(curr->expires, get_seconds())) {
77288+ curr->crashes = 0;
77289+ curr->expires = 0;
77290+ }
77291+
77292+ return 0;
77293+}
77294+
77295+void
77296+gr_handle_alertkill(struct task_struct *task)
77297+{
77298+ struct acl_subject_label *curracl;
77299+ __u32 curr_ip;
77300+ struct task_struct *p, *p2;
77301+
77302+ if (unlikely(!gr_acl_is_enabled()))
77303+ return;
77304+
77305+ curracl = task->acl;
77306+ curr_ip = task->signal->curr_ip;
77307+
77308+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
77309+ read_lock(&tasklist_lock);
77310+ do_each_thread(p2, p) {
77311+ if (p->signal->curr_ip == curr_ip)
77312+ gr_fake_force_sig(SIGKILL, p);
77313+ } while_each_thread(p2, p);
77314+ read_unlock(&tasklist_lock);
77315+ } else if (curracl->mode & GR_KILLPROC)
77316+ gr_fake_force_sig(SIGKILL, task);
77317+
77318+ return;
77319+}
77320diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
77321new file mode 100644
77322index 0000000..6b0c9cc
77323--- /dev/null
77324+++ b/grsecurity/gracl_shm.c
77325@@ -0,0 +1,40 @@
77326+#include <linux/kernel.h>
77327+#include <linux/mm.h>
77328+#include <linux/sched.h>
77329+#include <linux/file.h>
77330+#include <linux/ipc.h>
77331+#include <linux/gracl.h>
77332+#include <linux/grsecurity.h>
77333+#include <linux/grinternal.h>
77334+
77335+int
77336+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
77337+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
77338+{
77339+ struct task_struct *task;
77340+
77341+ if (!gr_acl_is_enabled())
77342+ return 1;
77343+
77344+ rcu_read_lock();
77345+ read_lock(&tasklist_lock);
77346+
77347+ task = find_task_by_vpid(shm_cprid);
77348+
77349+ if (unlikely(!task))
77350+ task = find_task_by_vpid(shm_lapid);
77351+
77352+ if (unlikely(task && (time_before_eq64(task->start_time, shm_createtime) ||
77353+ (task_pid_nr(task) == shm_lapid)) &&
77354+ (task->acl->mode & GR_PROTSHM) &&
77355+ (task->acl != current->acl))) {
77356+ read_unlock(&tasklist_lock);
77357+ rcu_read_unlock();
77358+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
77359+ return 0;
77360+ }
77361+ read_unlock(&tasklist_lock);
77362+ rcu_read_unlock();
77363+
77364+ return 1;
77365+}
77366diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
77367new file mode 100644
77368index 0000000..bc0be01
77369--- /dev/null
77370+++ b/grsecurity/grsec_chdir.c
77371@@ -0,0 +1,19 @@
77372+#include <linux/kernel.h>
77373+#include <linux/sched.h>
77374+#include <linux/fs.h>
77375+#include <linux/file.h>
77376+#include <linux/grsecurity.h>
77377+#include <linux/grinternal.h>
77378+
77379+void
77380+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
77381+{
77382+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
77383+ if ((grsec_enable_chdir && grsec_enable_group &&
77384+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
77385+ !grsec_enable_group)) {
77386+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
77387+ }
77388+#endif
77389+ return;
77390+}
77391diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
77392new file mode 100644
77393index 0000000..114ea4f
77394--- /dev/null
77395+++ b/grsecurity/grsec_chroot.c
77396@@ -0,0 +1,467 @@
77397+#include <linux/kernel.h>
77398+#include <linux/module.h>
77399+#include <linux/sched.h>
77400+#include <linux/file.h>
77401+#include <linux/fs.h>
77402+#include <linux/mount.h>
77403+#include <linux/types.h>
77404+#include "../fs/mount.h"
77405+#include <linux/grsecurity.h>
77406+#include <linux/grinternal.h>
77407+
77408+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
77409+int gr_init_ran;
77410+#endif
77411+
77412+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
77413+{
77414+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
77415+ struct dentry *tmpd = dentry;
77416+
77417+ read_seqlock_excl(&mount_lock);
77418+ write_seqlock(&rename_lock);
77419+
77420+ while (tmpd != mnt->mnt_root) {
77421+ atomic_inc(&tmpd->chroot_refcnt);
77422+ tmpd = tmpd->d_parent;
77423+ }
77424+ atomic_inc(&tmpd->chroot_refcnt);
77425+
77426+ write_sequnlock(&rename_lock);
77427+ read_sequnlock_excl(&mount_lock);
77428+#endif
77429+}
77430+
77431+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
77432+{
77433+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
77434+ struct dentry *tmpd = dentry;
77435+
77436+ read_seqlock_excl(&mount_lock);
77437+ write_seqlock(&rename_lock);
77438+
77439+ while (tmpd != mnt->mnt_root) {
77440+ atomic_dec(&tmpd->chroot_refcnt);
77441+ tmpd = tmpd->d_parent;
77442+ }
77443+ atomic_dec(&tmpd->chroot_refcnt);
77444+
77445+ write_sequnlock(&rename_lock);
77446+ read_sequnlock_excl(&mount_lock);
77447+#endif
77448+}
77449+
77450+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
77451+static struct dentry *get_closest_chroot(struct dentry *dentry)
77452+{
77453+ write_seqlock(&rename_lock);
77454+ do {
77455+ if (atomic_read(&dentry->chroot_refcnt)) {
77456+ write_sequnlock(&rename_lock);
77457+ return dentry;
77458+ }
77459+ dentry = dentry->d_parent;
77460+ } while (!IS_ROOT(dentry));
77461+ write_sequnlock(&rename_lock);
77462+ return NULL;
77463+}
77464+#endif
77465+
77466+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
77467+ struct dentry *newdentry, struct vfsmount *newmnt)
77468+{
77469+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
77470+ struct dentry *chroot;
77471+
77472+ if (unlikely(!grsec_enable_chroot_rename))
77473+ return 0;
77474+
77475+ if (likely(!proc_is_chrooted(current) && gr_is_global_root(current_uid())))
77476+ return 0;
77477+
77478+ chroot = get_closest_chroot(olddentry);
77479+
77480+ if (chroot == NULL)
77481+ return 0;
77482+
77483+ if (is_subdir(newdentry, chroot))
77484+ return 0;
77485+
77486+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_RENAME_MSG, olddentry, oldmnt);
77487+
77488+ return 1;
77489+#else
77490+ return 0;
77491+#endif
77492+}
77493+
77494+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
77495+{
77496+#ifdef CONFIG_GRKERNSEC
77497+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
77498+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
77499+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
77500+ && gr_init_ran
77501+#endif
77502+ )
77503+ task->gr_is_chrooted = 1;
77504+ else {
77505+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
77506+ if (task_pid_nr(task) == 1 && !gr_init_ran)
77507+ gr_init_ran = 1;
77508+#endif
77509+ task->gr_is_chrooted = 0;
77510+ }
77511+
77512+ task->gr_chroot_dentry = path->dentry;
77513+#endif
77514+ return;
77515+}
77516+
77517+void gr_clear_chroot_entries(struct task_struct *task)
77518+{
77519+#ifdef CONFIG_GRKERNSEC
77520+ task->gr_is_chrooted = 0;
77521+ task->gr_chroot_dentry = NULL;
77522+#endif
77523+ return;
77524+}
77525+
77526+int
77527+gr_handle_chroot_unix(const pid_t pid)
77528+{
77529+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
77530+ struct task_struct *p;
77531+
77532+ if (unlikely(!grsec_enable_chroot_unix))
77533+ return 1;
77534+
77535+ if (likely(!proc_is_chrooted(current)))
77536+ return 1;
77537+
77538+ rcu_read_lock();
77539+ read_lock(&tasklist_lock);
77540+ p = find_task_by_vpid_unrestricted(pid);
77541+ if (unlikely(p && !have_same_root(current, p))) {
77542+ read_unlock(&tasklist_lock);
77543+ rcu_read_unlock();
77544+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
77545+ return 0;
77546+ }
77547+ read_unlock(&tasklist_lock);
77548+ rcu_read_unlock();
77549+#endif
77550+ return 1;
77551+}
77552+
77553+int
77554+gr_handle_chroot_nice(void)
77555+{
77556+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
77557+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
77558+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
77559+ return -EPERM;
77560+ }
77561+#endif
77562+ return 0;
77563+}
77564+
77565+int
77566+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
77567+{
77568+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
77569+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
77570+ && proc_is_chrooted(current)) {
77571+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
77572+ return -EACCES;
77573+ }
77574+#endif
77575+ return 0;
77576+}
77577+
77578+int
77579+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
77580+{
77581+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
77582+ struct task_struct *p;
77583+ int ret = 0;
77584+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
77585+ return ret;
77586+
77587+ read_lock(&tasklist_lock);
77588+ do_each_pid_task(pid, type, p) {
77589+ if (!have_same_root(current, p)) {
77590+ ret = 1;
77591+ goto out;
77592+ }
77593+ } while_each_pid_task(pid, type, p);
77594+out:
77595+ read_unlock(&tasklist_lock);
77596+ return ret;
77597+#endif
77598+ return 0;
77599+}
77600+
77601+int
77602+gr_pid_is_chrooted(struct task_struct *p)
77603+{
77604+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
77605+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
77606+ return 0;
77607+
77608+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
77609+ !have_same_root(current, p)) {
77610+ return 1;
77611+ }
77612+#endif
77613+ return 0;
77614+}
77615+
77616+EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
77617+
77618+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
77619+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
77620+{
77621+ struct path path, currentroot;
77622+ int ret = 0;
77623+
77624+ path.dentry = (struct dentry *)u_dentry;
77625+ path.mnt = (struct vfsmount *)u_mnt;
77626+ get_fs_root(current->fs, &currentroot);
77627+ if (path_is_under(&path, &currentroot))
77628+ ret = 1;
77629+ path_put(&currentroot);
77630+
77631+ return ret;
77632+}
77633+#endif
77634+
77635+int
77636+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
77637+{
77638+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
77639+ if (!grsec_enable_chroot_fchdir)
77640+ return 1;
77641+
77642+ if (!proc_is_chrooted(current))
77643+ return 1;
77644+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
77645+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
77646+ return 0;
77647+ }
77648+#endif
77649+ return 1;
77650+}
77651+
77652+int
77653+gr_chroot_fhandle(void)
77654+{
77655+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
77656+ if (!grsec_enable_chroot_fchdir)
77657+ return 1;
77658+
77659+ if (!proc_is_chrooted(current))
77660+ return 1;
77661+ else {
77662+ gr_log_noargs(GR_DONT_AUDIT, GR_CHROOT_FHANDLE_MSG);
77663+ return 0;
77664+ }
77665+#endif
77666+ return 1;
77667+}
77668+
77669+int
77670+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
77671+ const u64 shm_createtime)
77672+{
77673+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
77674+ struct task_struct *p;
77675+
77676+ if (unlikely(!grsec_enable_chroot_shmat))
77677+ return 1;
77678+
77679+ if (likely(!proc_is_chrooted(current)))
77680+ return 1;
77681+
77682+ rcu_read_lock();
77683+ read_lock(&tasklist_lock);
77684+
77685+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
77686+ if (time_before_eq64(p->start_time, shm_createtime)) {
77687+ if (have_same_root(current, p)) {
77688+ goto allow;
77689+ } else {
77690+ read_unlock(&tasklist_lock);
77691+ rcu_read_unlock();
77692+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
77693+ return 0;
77694+ }
77695+ }
77696+ /* creator exited, pid reuse, fall through to next check */
77697+ }
77698+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
77699+ if (unlikely(!have_same_root(current, p))) {
77700+ read_unlock(&tasklist_lock);
77701+ rcu_read_unlock();
77702+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
77703+ return 0;
77704+ }
77705+ }
77706+
77707+allow:
77708+ read_unlock(&tasklist_lock);
77709+ rcu_read_unlock();
77710+#endif
77711+ return 1;
77712+}
77713+
77714+void
77715+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
77716+{
77717+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
77718+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
77719+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
77720+#endif
77721+ return;
77722+}
77723+
77724+int
77725+gr_handle_chroot_mknod(const struct dentry *dentry,
77726+ const struct vfsmount *mnt, const int mode)
77727+{
77728+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
77729+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
77730+ proc_is_chrooted(current)) {
77731+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
77732+ return -EPERM;
77733+ }
77734+#endif
77735+ return 0;
77736+}
77737+
77738+int
77739+gr_handle_chroot_mount(const struct dentry *dentry,
77740+ const struct vfsmount *mnt, const char *dev_name)
77741+{
77742+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
77743+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
77744+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
77745+ return -EPERM;
77746+ }
77747+#endif
77748+ return 0;
77749+}
77750+
77751+int
77752+gr_handle_chroot_pivot(void)
77753+{
77754+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
77755+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
77756+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
77757+ return -EPERM;
77758+ }
77759+#endif
77760+ return 0;
77761+}
77762+
77763+int
77764+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
77765+{
77766+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
77767+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
77768+ !gr_is_outside_chroot(dentry, mnt)) {
77769+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
77770+ return -EPERM;
77771+ }
77772+#endif
77773+ return 0;
77774+}
77775+
77776+extern const char *captab_log[];
77777+extern int captab_log_entries;
77778+
77779+int
77780+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
77781+{
77782+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77783+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
77784+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
77785+ if (cap_raised(chroot_caps, cap)) {
77786+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
77787+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
77788+ }
77789+ return 0;
77790+ }
77791+ }
77792+#endif
77793+ return 1;
77794+}
77795+
77796+int
77797+gr_chroot_is_capable(const int cap)
77798+{
77799+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77800+ return gr_task_chroot_is_capable(current, current_cred(), cap);
77801+#endif
77802+ return 1;
77803+}
77804+
77805+int
77806+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
77807+{
77808+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77809+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
77810+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
77811+ if (cap_raised(chroot_caps, cap)) {
77812+ return 0;
77813+ }
77814+ }
77815+#endif
77816+ return 1;
77817+}
77818+
77819+int
77820+gr_chroot_is_capable_nolog(const int cap)
77821+{
77822+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77823+ return gr_task_chroot_is_capable_nolog(current, cap);
77824+#endif
77825+ return 1;
77826+}
77827+
77828+int
77829+gr_handle_chroot_sysctl(const int op)
77830+{
77831+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
77832+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
77833+ proc_is_chrooted(current))
77834+ return -EACCES;
77835+#endif
77836+ return 0;
77837+}
77838+
77839+void
77840+gr_handle_chroot_chdir(const struct path *path)
77841+{
77842+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
77843+ if (grsec_enable_chroot_chdir)
77844+ set_fs_pwd(current->fs, path);
77845+#endif
77846+ return;
77847+}
77848+
77849+int
77850+gr_handle_chroot_chmod(const struct dentry *dentry,
77851+ const struct vfsmount *mnt, const int mode)
77852+{
77853+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
77854+ /* allow chmod +s on directories, but not files */
77855+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
77856+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
77857+ proc_is_chrooted(current)) {
77858+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
77859+ return -EPERM;
77860+ }
77861+#endif
77862+ return 0;
77863+}
77864diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
77865new file mode 100644
77866index 0000000..946f750
77867--- /dev/null
77868+++ b/grsecurity/grsec_disabled.c
77869@@ -0,0 +1,445 @@
77870+#include <linux/kernel.h>
77871+#include <linux/module.h>
77872+#include <linux/sched.h>
77873+#include <linux/file.h>
77874+#include <linux/fs.h>
77875+#include <linux/kdev_t.h>
77876+#include <linux/net.h>
77877+#include <linux/in.h>
77878+#include <linux/ip.h>
77879+#include <linux/skbuff.h>
77880+#include <linux/sysctl.h>
77881+
77882+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
77883+void
77884+pax_set_initial_flags(struct linux_binprm *bprm)
77885+{
77886+ return;
77887+}
77888+#endif
77889+
77890+#ifdef CONFIG_SYSCTL
77891+__u32
77892+gr_handle_sysctl(const struct ctl_table * table, const int op)
77893+{
77894+ return 0;
77895+}
77896+#endif
77897+
77898+#ifdef CONFIG_TASKSTATS
77899+int gr_is_taskstats_denied(int pid)
77900+{
77901+ return 0;
77902+}
77903+#endif
77904+
77905+int
77906+gr_acl_is_enabled(void)
77907+{
77908+ return 0;
77909+}
77910+
77911+int
77912+gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
77913+{
77914+ return 0;
77915+}
77916+
77917+void
77918+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
77919+{
77920+ return;
77921+}
77922+
77923+int
77924+gr_handle_rawio(const struct inode *inode)
77925+{
77926+ return 0;
77927+}
77928+
77929+void
77930+gr_acl_handle_psacct(struct task_struct *task, const long code)
77931+{
77932+ return;
77933+}
77934+
77935+int
77936+gr_handle_ptrace(struct task_struct *task, const long request)
77937+{
77938+ return 0;
77939+}
77940+
77941+int
77942+gr_handle_proc_ptrace(struct task_struct *task)
77943+{
77944+ return 0;
77945+}
77946+
77947+int
77948+gr_set_acls(const int type)
77949+{
77950+ return 0;
77951+}
77952+
77953+int
77954+gr_check_hidden_task(const struct task_struct *tsk)
77955+{
77956+ return 0;
77957+}
77958+
77959+int
77960+gr_check_protected_task(const struct task_struct *task)
77961+{
77962+ return 0;
77963+}
77964+
77965+int
77966+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
77967+{
77968+ return 0;
77969+}
77970+
77971+void
77972+gr_copy_label(struct task_struct *tsk)
77973+{
77974+ return;
77975+}
77976+
77977+void
77978+gr_set_pax_flags(struct task_struct *task)
77979+{
77980+ return;
77981+}
77982+
77983+int
77984+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
77985+ const int unsafe_share)
77986+{
77987+ return 0;
77988+}
77989+
77990+void
77991+gr_handle_delete(const u64 ino, const dev_t dev)
77992+{
77993+ return;
77994+}
77995+
77996+void
77997+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
77998+{
77999+ return;
78000+}
78001+
78002+void
78003+gr_handle_crash(struct task_struct *task, const int sig)
78004+{
78005+ return;
78006+}
78007+
78008+int
78009+gr_check_crash_exec(const struct file *filp)
78010+{
78011+ return 0;
78012+}
78013+
78014+int
78015+gr_check_crash_uid(const kuid_t uid)
78016+{
78017+ return 0;
78018+}
78019+
78020+void
78021+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
78022+ struct dentry *old_dentry,
78023+ struct dentry *new_dentry,
78024+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
78025+{
78026+ return;
78027+}
78028+
78029+int
78030+gr_search_socket(const int family, const int type, const int protocol)
78031+{
78032+ return 1;
78033+}
78034+
78035+int
78036+gr_search_connectbind(const int mode, const struct socket *sock,
78037+ const struct sockaddr_in *addr)
78038+{
78039+ return 0;
78040+}
78041+
78042+void
78043+gr_handle_alertkill(struct task_struct *task)
78044+{
78045+ return;
78046+}
78047+
78048+__u32
78049+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
78050+{
78051+ return 1;
78052+}
78053+
78054+__u32
78055+gr_acl_handle_hidden_file(const struct dentry * dentry,
78056+ const struct vfsmount * mnt)
78057+{
78058+ return 1;
78059+}
78060+
78061+__u32
78062+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
78063+ int acc_mode)
78064+{
78065+ return 1;
78066+}
78067+
78068+__u32
78069+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
78070+{
78071+ return 1;
78072+}
78073+
78074+__u32
78075+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
78076+{
78077+ return 1;
78078+}
78079+
78080+int
78081+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
78082+ unsigned int *vm_flags)
78083+{
78084+ return 1;
78085+}
78086+
78087+__u32
78088+gr_acl_handle_truncate(const struct dentry * dentry,
78089+ const struct vfsmount * mnt)
78090+{
78091+ return 1;
78092+}
78093+
78094+__u32
78095+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
78096+{
78097+ return 1;
78098+}
78099+
78100+__u32
78101+gr_acl_handle_access(const struct dentry * dentry,
78102+ const struct vfsmount * mnt, const int fmode)
78103+{
78104+ return 1;
78105+}
78106+
78107+__u32
78108+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
78109+ umode_t *mode)
78110+{
78111+ return 1;
78112+}
78113+
78114+__u32
78115+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
78116+{
78117+ return 1;
78118+}
78119+
78120+__u32
78121+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
78122+{
78123+ return 1;
78124+}
78125+
78126+__u32
78127+gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
78128+{
78129+ return 1;
78130+}
78131+
78132+void
78133+grsecurity_init(void)
78134+{
78135+ return;
78136+}
78137+
78138+umode_t gr_acl_umask(void)
78139+{
78140+ return 0;
78141+}
78142+
78143+__u32
78144+gr_acl_handle_mknod(const struct dentry * new_dentry,
78145+ const struct dentry * parent_dentry,
78146+ const struct vfsmount * parent_mnt,
78147+ const int mode)
78148+{
78149+ return 1;
78150+}
78151+
78152+__u32
78153+gr_acl_handle_mkdir(const struct dentry * new_dentry,
78154+ const struct dentry * parent_dentry,
78155+ const struct vfsmount * parent_mnt)
78156+{
78157+ return 1;
78158+}
78159+
78160+__u32
78161+gr_acl_handle_symlink(const struct dentry * new_dentry,
78162+ const struct dentry * parent_dentry,
78163+ const struct vfsmount * parent_mnt, const struct filename *from)
78164+{
78165+ return 1;
78166+}
78167+
78168+__u32
78169+gr_acl_handle_link(const struct dentry * new_dentry,
78170+ const struct dentry * parent_dentry,
78171+ const struct vfsmount * parent_mnt,
78172+ const struct dentry * old_dentry,
78173+ const struct vfsmount * old_mnt, const struct filename *to)
78174+{
78175+ return 1;
78176+}
78177+
78178+int
78179+gr_acl_handle_rename(const struct dentry *new_dentry,
78180+ const struct dentry *parent_dentry,
78181+ const struct vfsmount *parent_mnt,
78182+ const struct dentry *old_dentry,
78183+ const struct inode *old_parent_inode,
78184+ const struct vfsmount *old_mnt, const struct filename *newname,
78185+ unsigned int flags)
78186+{
78187+ return 0;
78188+}
78189+
78190+int
78191+gr_acl_handle_filldir(const struct file *file, const char *name,
78192+ const int namelen, const u64 ino)
78193+{
78194+ return 1;
78195+}
78196+
78197+int
78198+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
78199+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
78200+{
78201+ return 1;
78202+}
78203+
78204+int
78205+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
78206+{
78207+ return 0;
78208+}
78209+
78210+int
78211+gr_search_accept(const struct socket *sock)
78212+{
78213+ return 0;
78214+}
78215+
78216+int
78217+gr_search_listen(const struct socket *sock)
78218+{
78219+ return 0;
78220+}
78221+
78222+int
78223+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
78224+{
78225+ return 0;
78226+}
78227+
78228+__u32
78229+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
78230+{
78231+ return 1;
78232+}
78233+
78234+__u32
78235+gr_acl_handle_creat(const struct dentry * dentry,
78236+ const struct dentry * p_dentry,
78237+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
78238+ const int imode)
78239+{
78240+ return 1;
78241+}
78242+
78243+void
78244+gr_acl_handle_exit(void)
78245+{
78246+ return;
78247+}
78248+
78249+int
78250+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
78251+{
78252+ return 1;
78253+}
78254+
78255+void
78256+gr_set_role_label(const kuid_t uid, const kgid_t gid)
78257+{
78258+ return;
78259+}
78260+
78261+int
78262+gr_acl_handle_procpidmem(const struct task_struct *task)
78263+{
78264+ return 0;
78265+}
78266+
78267+int
78268+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
78269+{
78270+ return 0;
78271+}
78272+
78273+int
78274+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
78275+{
78276+ return 0;
78277+}
78278+
78279+int
78280+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
78281+{
78282+ return 0;
78283+}
78284+
78285+int
78286+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
78287+{
78288+ return 0;
78289+}
78290+
78291+int gr_acl_enable_at_secure(void)
78292+{
78293+ return 0;
78294+}
78295+
78296+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
78297+{
78298+ return dentry->d_sb->s_dev;
78299+}
78300+
78301+u64 gr_get_ino_from_dentry(struct dentry *dentry)
78302+{
78303+ return dentry->d_inode->i_ino;
78304+}
78305+
78306+void gr_put_exec_file(struct task_struct *task)
78307+{
78308+ return;
78309+}
78310+
78311+#ifdef CONFIG_SECURITY
78312+EXPORT_SYMBOL_GPL(gr_check_user_change);
78313+EXPORT_SYMBOL_GPL(gr_check_group_change);
78314+#endif
78315diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
78316new file mode 100644
78317index 0000000..fb7531e
78318--- /dev/null
78319+++ b/grsecurity/grsec_exec.c
78320@@ -0,0 +1,189 @@
78321+#include <linux/kernel.h>
78322+#include <linux/sched.h>
78323+#include <linux/file.h>
78324+#include <linux/binfmts.h>
78325+#include <linux/fs.h>
78326+#include <linux/types.h>
78327+#include <linux/grdefs.h>
78328+#include <linux/grsecurity.h>
78329+#include <linux/grinternal.h>
78330+#include <linux/capability.h>
78331+#include <linux/module.h>
78332+#include <linux/compat.h>
78333+
78334+#include <asm/uaccess.h>
78335+
78336+#ifdef CONFIG_GRKERNSEC_EXECLOG
78337+static char gr_exec_arg_buf[132];
78338+static DEFINE_MUTEX(gr_exec_arg_mutex);
78339+#endif
78340+
78341+struct user_arg_ptr {
78342+#ifdef CONFIG_COMPAT
78343+ bool is_compat;
78344+#endif
78345+ union {
78346+ const char __user *const __user *native;
78347+#ifdef CONFIG_COMPAT
78348+ const compat_uptr_t __user *compat;
78349+#endif
78350+ } ptr;
78351+};
78352+
78353+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
78354+
78355+void
78356+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
78357+{
78358+#ifdef CONFIG_GRKERNSEC_EXECLOG
78359+ char *grarg = gr_exec_arg_buf;
78360+ unsigned int i, x, execlen = 0;
78361+ char c;
78362+
78363+ if (!((grsec_enable_execlog && grsec_enable_group &&
78364+ in_group_p(grsec_audit_gid))
78365+ || (grsec_enable_execlog && !grsec_enable_group)))
78366+ return;
78367+
78368+ mutex_lock(&gr_exec_arg_mutex);
78369+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
78370+
78371+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
78372+ const char __user *p;
78373+ unsigned int len;
78374+
78375+ p = get_user_arg_ptr(argv, i);
78376+ if (IS_ERR(p))
78377+ goto log;
78378+
78379+ len = strnlen_user(p, 128 - execlen);
78380+ if (len > 128 - execlen)
78381+ len = 128 - execlen;
78382+ else if (len > 0)
78383+ len--;
78384+ if (copy_from_user(grarg + execlen, p, len))
78385+ goto log;
78386+
78387+ /* rewrite unprintable characters */
78388+ for (x = 0; x < len; x++) {
78389+ c = *(grarg + execlen + x);
78390+ if (c < 32 || c > 126)
78391+ *(grarg + execlen + x) = ' ';
78392+ }
78393+
78394+ execlen += len;
78395+ *(grarg + execlen) = ' ';
78396+ *(grarg + execlen + 1) = '\0';
78397+ execlen++;
78398+ }
78399+
78400+ log:
78401+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
78402+ bprm->file->f_path.mnt, grarg);
78403+ mutex_unlock(&gr_exec_arg_mutex);
78404+#endif
78405+ return;
78406+}
78407+
78408+#ifdef CONFIG_GRKERNSEC
78409+extern int gr_acl_is_capable(const int cap);
78410+extern int gr_acl_is_capable_nolog(const int cap);
78411+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
78412+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
78413+extern int gr_chroot_is_capable(const int cap);
78414+extern int gr_chroot_is_capable_nolog(const int cap);
78415+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
78416+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
78417+#endif
78418+
78419+const char *captab_log[] = {
78420+ "CAP_CHOWN",
78421+ "CAP_DAC_OVERRIDE",
78422+ "CAP_DAC_READ_SEARCH",
78423+ "CAP_FOWNER",
78424+ "CAP_FSETID",
78425+ "CAP_KILL",
78426+ "CAP_SETGID",
78427+ "CAP_SETUID",
78428+ "CAP_SETPCAP",
78429+ "CAP_LINUX_IMMUTABLE",
78430+ "CAP_NET_BIND_SERVICE",
78431+ "CAP_NET_BROADCAST",
78432+ "CAP_NET_ADMIN",
78433+ "CAP_NET_RAW",
78434+ "CAP_IPC_LOCK",
78435+ "CAP_IPC_OWNER",
78436+ "CAP_SYS_MODULE",
78437+ "CAP_SYS_RAWIO",
78438+ "CAP_SYS_CHROOT",
78439+ "CAP_SYS_PTRACE",
78440+ "CAP_SYS_PACCT",
78441+ "CAP_SYS_ADMIN",
78442+ "CAP_SYS_BOOT",
78443+ "CAP_SYS_NICE",
78444+ "CAP_SYS_RESOURCE",
78445+ "CAP_SYS_TIME",
78446+ "CAP_SYS_TTY_CONFIG",
78447+ "CAP_MKNOD",
78448+ "CAP_LEASE",
78449+ "CAP_AUDIT_WRITE",
78450+ "CAP_AUDIT_CONTROL",
78451+ "CAP_SETFCAP",
78452+ "CAP_MAC_OVERRIDE",
78453+ "CAP_MAC_ADMIN",
78454+ "CAP_SYSLOG",
78455+ "CAP_WAKE_ALARM",
78456+ "CAP_BLOCK_SUSPEND",
78457+ "CAP_AUDIT_READ"
78458+};
78459+
78460+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
78461+
78462+int gr_is_capable(const int cap)
78463+{
78464+#ifdef CONFIG_GRKERNSEC
78465+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
78466+ return 1;
78467+ return 0;
78468+#else
78469+ return 1;
78470+#endif
78471+}
78472+
78473+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
78474+{
78475+#ifdef CONFIG_GRKERNSEC
78476+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
78477+ return 1;
78478+ return 0;
78479+#else
78480+ return 1;
78481+#endif
78482+}
78483+
78484+int gr_is_capable_nolog(const int cap)
78485+{
78486+#ifdef CONFIG_GRKERNSEC
78487+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
78488+ return 1;
78489+ return 0;
78490+#else
78491+ return 1;
78492+#endif
78493+}
78494+
78495+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
78496+{
78497+#ifdef CONFIG_GRKERNSEC
78498+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
78499+ return 1;
78500+ return 0;
78501+#else
78502+ return 1;
78503+#endif
78504+}
78505+
78506+EXPORT_SYMBOL_GPL(gr_is_capable);
78507+EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
78508+EXPORT_SYMBOL_GPL(gr_task_is_capable);
78509+EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog);
78510diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
78511new file mode 100644
78512index 0000000..06cc6ea
78513--- /dev/null
78514+++ b/grsecurity/grsec_fifo.c
78515@@ -0,0 +1,24 @@
78516+#include <linux/kernel.h>
78517+#include <linux/sched.h>
78518+#include <linux/fs.h>
78519+#include <linux/file.h>
78520+#include <linux/grinternal.h>
78521+
78522+int
78523+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
78524+ const struct dentry *dir, const int flag, const int acc_mode)
78525+{
78526+#ifdef CONFIG_GRKERNSEC_FIFO
78527+ const struct cred *cred = current_cred();
78528+
78529+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
78530+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
78531+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
78532+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
78533+ if (!inode_permission(dentry->d_inode, acc_mode))
78534+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
78535+ return -EACCES;
78536+ }
78537+#endif
78538+ return 0;
78539+}
78540diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
78541new file mode 100644
78542index 0000000..8ca18bf
78543--- /dev/null
78544+++ b/grsecurity/grsec_fork.c
78545@@ -0,0 +1,23 @@
78546+#include <linux/kernel.h>
78547+#include <linux/sched.h>
78548+#include <linux/grsecurity.h>
78549+#include <linux/grinternal.h>
78550+#include <linux/errno.h>
78551+
78552+void
78553+gr_log_forkfail(const int retval)
78554+{
78555+#ifdef CONFIG_GRKERNSEC_FORKFAIL
78556+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
78557+ switch (retval) {
78558+ case -EAGAIN:
78559+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
78560+ break;
78561+ case -ENOMEM:
78562+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
78563+ break;
78564+ }
78565+ }
78566+#endif
78567+ return;
78568+}
78569diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
78570new file mode 100644
78571index 0000000..4ed9e7d
78572--- /dev/null
78573+++ b/grsecurity/grsec_init.c
78574@@ -0,0 +1,290 @@
78575+#include <linux/kernel.h>
78576+#include <linux/sched.h>
78577+#include <linux/mm.h>
78578+#include <linux/gracl.h>
78579+#include <linux/slab.h>
78580+#include <linux/vmalloc.h>
78581+#include <linux/percpu.h>
78582+#include <linux/module.h>
78583+
78584+int grsec_enable_ptrace_readexec;
78585+int grsec_enable_setxid;
78586+int grsec_enable_symlinkown;
78587+kgid_t grsec_symlinkown_gid;
78588+int grsec_enable_brute;
78589+int grsec_enable_link;
78590+int grsec_enable_dmesg;
78591+int grsec_enable_harden_ptrace;
78592+int grsec_enable_harden_ipc;
78593+int grsec_enable_fifo;
78594+int grsec_enable_execlog;
78595+int grsec_enable_signal;
78596+int grsec_enable_forkfail;
78597+int grsec_enable_audit_ptrace;
78598+int grsec_enable_time;
78599+int grsec_enable_group;
78600+kgid_t grsec_audit_gid;
78601+int grsec_enable_chdir;
78602+int grsec_enable_mount;
78603+int grsec_enable_rofs;
78604+int grsec_deny_new_usb;
78605+int grsec_enable_chroot_findtask;
78606+int grsec_enable_chroot_mount;
78607+int grsec_enable_chroot_shmat;
78608+int grsec_enable_chroot_fchdir;
78609+int grsec_enable_chroot_double;
78610+int grsec_enable_chroot_pivot;
78611+int grsec_enable_chroot_chdir;
78612+int grsec_enable_chroot_chmod;
78613+int grsec_enable_chroot_mknod;
78614+int grsec_enable_chroot_nice;
78615+int grsec_enable_chroot_execlog;
78616+int grsec_enable_chroot_caps;
78617+int grsec_enable_chroot_rename;
78618+int grsec_enable_chroot_sysctl;
78619+int grsec_enable_chroot_unix;
78620+int grsec_enable_tpe;
78621+kgid_t grsec_tpe_gid;
78622+int grsec_enable_blackhole;
78623+#ifdef CONFIG_IPV6_MODULE
78624+EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
78625+#endif
78626+int grsec_lastack_retries;
78627+int grsec_enable_tpe_all;
78628+int grsec_enable_tpe_invert;
78629+int grsec_enable_socket_all;
78630+kgid_t grsec_socket_all_gid;
78631+int grsec_enable_socket_client;
78632+kgid_t grsec_socket_client_gid;
78633+int grsec_enable_socket_server;
78634+kgid_t grsec_socket_server_gid;
78635+int grsec_resource_logging;
78636+int grsec_disable_privio;
78637+int grsec_enable_log_rwxmaps;
78638+int grsec_lock;
78639+
78640+DEFINE_SPINLOCK(grsec_alert_lock);
78641+unsigned long grsec_alert_wtime = 0;
78642+unsigned long grsec_alert_fyet = 0;
78643+
78644+DEFINE_SPINLOCK(grsec_audit_lock);
78645+
78646+DEFINE_RWLOCK(grsec_exec_file_lock);
78647+
78648+char *gr_shared_page[4];
78649+
78650+char *gr_alert_log_fmt;
78651+char *gr_audit_log_fmt;
78652+char *gr_alert_log_buf;
78653+char *gr_audit_log_buf;
78654+
78655+extern struct gr_arg *gr_usermode;
78656+extern unsigned char *gr_system_salt;
78657+extern unsigned char *gr_system_sum;
78658+
78659+void __init
78660+grsecurity_init(void)
78661+{
78662+ int j;
78663+ /* create the per-cpu shared pages */
78664+
78665+#ifdef CONFIG_X86
78666+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
78667+#endif
78668+
78669+ for (j = 0; j < 4; j++) {
78670+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
78671+ if (gr_shared_page[j] == NULL) {
78672+ panic("Unable to allocate grsecurity shared page");
78673+ return;
78674+ }
78675+ }
78676+
78677+ /* allocate log buffers */
78678+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
78679+ if (!gr_alert_log_fmt) {
78680+ panic("Unable to allocate grsecurity alert log format buffer");
78681+ return;
78682+ }
78683+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
78684+ if (!gr_audit_log_fmt) {
78685+ panic("Unable to allocate grsecurity audit log format buffer");
78686+ return;
78687+ }
78688+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
78689+ if (!gr_alert_log_buf) {
78690+ panic("Unable to allocate grsecurity alert log buffer");
78691+ return;
78692+ }
78693+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
78694+ if (!gr_audit_log_buf) {
78695+ panic("Unable to allocate grsecurity audit log buffer");
78696+ return;
78697+ }
78698+
78699+ /* allocate memory for authentication structure */
78700+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
78701+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
78702+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
78703+
78704+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
78705+ panic("Unable to allocate grsecurity authentication structure");
78706+ return;
78707+ }
78708+
78709+#ifdef CONFIG_GRKERNSEC_IO
78710+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
78711+ grsec_disable_privio = 1;
78712+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
78713+ grsec_disable_privio = 1;
78714+#else
78715+ grsec_disable_privio = 0;
78716+#endif
78717+#endif
78718+
78719+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
78720+ /* for backward compatibility, tpe_invert always defaults to on if
78721+ enabled in the kernel
78722+ */
78723+ grsec_enable_tpe_invert = 1;
78724+#endif
78725+
78726+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
78727+#ifndef CONFIG_GRKERNSEC_SYSCTL
78728+ grsec_lock = 1;
78729+#endif
78730+
78731+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78732+ grsec_enable_log_rwxmaps = 1;
78733+#endif
78734+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
78735+ grsec_enable_group = 1;
78736+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
78737+#endif
78738+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
78739+ grsec_enable_ptrace_readexec = 1;
78740+#endif
78741+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
78742+ grsec_enable_chdir = 1;
78743+#endif
78744+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
78745+ grsec_enable_harden_ptrace = 1;
78746+#endif
78747+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
78748+ grsec_enable_harden_ipc = 1;
78749+#endif
78750+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78751+ grsec_enable_mount = 1;
78752+#endif
78753+#ifdef CONFIG_GRKERNSEC_LINK
78754+ grsec_enable_link = 1;
78755+#endif
78756+#ifdef CONFIG_GRKERNSEC_BRUTE
78757+ grsec_enable_brute = 1;
78758+#endif
78759+#ifdef CONFIG_GRKERNSEC_DMESG
78760+ grsec_enable_dmesg = 1;
78761+#endif
78762+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
78763+ grsec_enable_blackhole = 1;
78764+ grsec_lastack_retries = 4;
78765+#endif
78766+#ifdef CONFIG_GRKERNSEC_FIFO
78767+ grsec_enable_fifo = 1;
78768+#endif
78769+#ifdef CONFIG_GRKERNSEC_EXECLOG
78770+ grsec_enable_execlog = 1;
78771+#endif
78772+#ifdef CONFIG_GRKERNSEC_SETXID
78773+ grsec_enable_setxid = 1;
78774+#endif
78775+#ifdef CONFIG_GRKERNSEC_SIGNAL
78776+ grsec_enable_signal = 1;
78777+#endif
78778+#ifdef CONFIG_GRKERNSEC_FORKFAIL
78779+ grsec_enable_forkfail = 1;
78780+#endif
78781+#ifdef CONFIG_GRKERNSEC_TIME
78782+ grsec_enable_time = 1;
78783+#endif
78784+#ifdef CONFIG_GRKERNSEC_RESLOG
78785+ grsec_resource_logging = 1;
78786+#endif
78787+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
78788+ grsec_enable_chroot_findtask = 1;
78789+#endif
78790+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
78791+ grsec_enable_chroot_unix = 1;
78792+#endif
78793+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
78794+ grsec_enable_chroot_mount = 1;
78795+#endif
78796+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
78797+ grsec_enable_chroot_fchdir = 1;
78798+#endif
78799+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
78800+ grsec_enable_chroot_shmat = 1;
78801+#endif
78802+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
78803+ grsec_enable_audit_ptrace = 1;
78804+#endif
78805+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
78806+ grsec_enable_chroot_double = 1;
78807+#endif
78808+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
78809+ grsec_enable_chroot_pivot = 1;
78810+#endif
78811+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
78812+ grsec_enable_chroot_chdir = 1;
78813+#endif
78814+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
78815+ grsec_enable_chroot_chmod = 1;
78816+#endif
78817+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
78818+ grsec_enable_chroot_mknod = 1;
78819+#endif
78820+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
78821+ grsec_enable_chroot_nice = 1;
78822+#endif
78823+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
78824+ grsec_enable_chroot_execlog = 1;
78825+#endif
78826+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
78827+ grsec_enable_chroot_caps = 1;
78828+#endif
78829+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
78830+ grsec_enable_chroot_rename = 1;
78831+#endif
78832+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
78833+ grsec_enable_chroot_sysctl = 1;
78834+#endif
78835+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
78836+ grsec_enable_symlinkown = 1;
78837+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
78838+#endif
78839+#ifdef CONFIG_GRKERNSEC_TPE
78840+ grsec_enable_tpe = 1;
78841+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
78842+#ifdef CONFIG_GRKERNSEC_TPE_ALL
78843+ grsec_enable_tpe_all = 1;
78844+#endif
78845+#endif
78846+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
78847+ grsec_enable_socket_all = 1;
78848+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
78849+#endif
78850+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
78851+ grsec_enable_socket_client = 1;
78852+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
78853+#endif
78854+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78855+ grsec_enable_socket_server = 1;
78856+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
78857+#endif
78858+#endif
78859+#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
78860+ grsec_deny_new_usb = 1;
78861+#endif
78862+
78863+ return;
78864+}
78865diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
78866new file mode 100644
78867index 0000000..1773300
78868--- /dev/null
78869+++ b/grsecurity/grsec_ipc.c
78870@@ -0,0 +1,48 @@
78871+#include <linux/kernel.h>
78872+#include <linux/mm.h>
78873+#include <linux/sched.h>
78874+#include <linux/file.h>
78875+#include <linux/ipc.h>
78876+#include <linux/ipc_namespace.h>
78877+#include <linux/grsecurity.h>
78878+#include <linux/grinternal.h>
78879+
78880+int
78881+gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
78882+{
78883+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
78884+ int write;
78885+ int orig_granted_mode;
78886+ kuid_t euid;
78887+ kgid_t egid;
78888+
78889+ if (!grsec_enable_harden_ipc)
78890+ return 1;
78891+
78892+ euid = current_euid();
78893+ egid = current_egid();
78894+
78895+ write = requested_mode & 00002;
78896+ orig_granted_mode = ipcp->mode;
78897+
78898+ if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid))
78899+ orig_granted_mode >>= 6;
78900+ else {
78901+ /* if likely wrong permissions, lock to user */
78902+ if (orig_granted_mode & 0007)
78903+ orig_granted_mode = 0;
78904+ /* otherwise do a egid-only check */
78905+ else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid))
78906+ orig_granted_mode >>= 3;
78907+ /* otherwise, no access */
78908+ else
78909+ orig_granted_mode = 0;
78910+ }
78911+ if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) &&
78912+ !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
78913+ gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid));
78914+ return 0;
78915+ }
78916+#endif
78917+ return 1;
78918+}
78919diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
78920new file mode 100644
78921index 0000000..5e05e20
78922--- /dev/null
78923+++ b/grsecurity/grsec_link.c
78924@@ -0,0 +1,58 @@
78925+#include <linux/kernel.h>
78926+#include <linux/sched.h>
78927+#include <linux/fs.h>
78928+#include <linux/file.h>
78929+#include <linux/grinternal.h>
78930+
78931+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
78932+{
78933+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
78934+ const struct inode *link_inode = link->dentry->d_inode;
78935+
78936+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
78937+ /* ignore root-owned links, e.g. /proc/self */
78938+ gr_is_global_nonroot(link_inode->i_uid) && target &&
78939+ !uid_eq(link_inode->i_uid, target->i_uid)) {
78940+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
78941+ return 1;
78942+ }
78943+#endif
78944+ return 0;
78945+}
78946+
78947+int
78948+gr_handle_follow_link(const struct inode *parent,
78949+ const struct inode *inode,
78950+ const struct dentry *dentry, const struct vfsmount *mnt)
78951+{
78952+#ifdef CONFIG_GRKERNSEC_LINK
78953+ const struct cred *cred = current_cred();
78954+
78955+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
78956+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
78957+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
78958+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
78959+ return -EACCES;
78960+ }
78961+#endif
78962+ return 0;
78963+}
78964+
78965+int
78966+gr_handle_hardlink(const struct dentry *dentry,
78967+ const struct vfsmount *mnt,
78968+ struct inode *inode, const int mode, const struct filename *to)
78969+{
78970+#ifdef CONFIG_GRKERNSEC_LINK
78971+ const struct cred *cred = current_cred();
78972+
78973+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
78974+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
78975+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
78976+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
78977+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
78978+ return -EPERM;
78979+ }
78980+#endif
78981+ return 0;
78982+}
78983diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
78984new file mode 100644
78985index 0000000..dbe0a6b
78986--- /dev/null
78987+++ b/grsecurity/grsec_log.c
78988@@ -0,0 +1,341 @@
78989+#include <linux/kernel.h>
78990+#include <linux/sched.h>
78991+#include <linux/file.h>
78992+#include <linux/tty.h>
78993+#include <linux/fs.h>
78994+#include <linux/mm.h>
78995+#include <linux/grinternal.h>
78996+
78997+#ifdef CONFIG_TREE_PREEMPT_RCU
78998+#define DISABLE_PREEMPT() preempt_disable()
78999+#define ENABLE_PREEMPT() preempt_enable()
79000+#else
79001+#define DISABLE_PREEMPT()
79002+#define ENABLE_PREEMPT()
79003+#endif
79004+
79005+#define BEGIN_LOCKS(x) \
79006+ DISABLE_PREEMPT(); \
79007+ rcu_read_lock(); \
79008+ read_lock(&tasklist_lock); \
79009+ read_lock(&grsec_exec_file_lock); \
79010+ if (x != GR_DO_AUDIT) \
79011+ spin_lock(&grsec_alert_lock); \
79012+ else \
79013+ spin_lock(&grsec_audit_lock)
79014+
79015+#define END_LOCKS(x) \
79016+ if (x != GR_DO_AUDIT) \
79017+ spin_unlock(&grsec_alert_lock); \
79018+ else \
79019+ spin_unlock(&grsec_audit_lock); \
79020+ read_unlock(&grsec_exec_file_lock); \
79021+ read_unlock(&tasklist_lock); \
79022+ rcu_read_unlock(); \
79023+ ENABLE_PREEMPT(); \
79024+ if (x == GR_DONT_AUDIT) \
79025+ gr_handle_alertkill(current)
79026+
79027+enum {
79028+ FLOODING,
79029+ NO_FLOODING
79030+};
79031+
79032+extern char *gr_alert_log_fmt;
79033+extern char *gr_audit_log_fmt;
79034+extern char *gr_alert_log_buf;
79035+extern char *gr_audit_log_buf;
79036+
79037+static int gr_log_start(int audit)
79038+{
79039+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
79040+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
79041+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
79042+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
79043+ unsigned long curr_secs = get_seconds();
79044+
79045+ if (audit == GR_DO_AUDIT)
79046+ goto set_fmt;
79047+
79048+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
79049+ grsec_alert_wtime = curr_secs;
79050+ grsec_alert_fyet = 0;
79051+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
79052+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
79053+ grsec_alert_fyet++;
79054+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
79055+ grsec_alert_wtime = curr_secs;
79056+ grsec_alert_fyet++;
79057+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
79058+ return FLOODING;
79059+ }
79060+ else return FLOODING;
79061+
79062+set_fmt:
79063+#endif
79064+ memset(buf, 0, PAGE_SIZE);
79065+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
79066+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
79067+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
79068+ } else if (current->signal->curr_ip) {
79069+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
79070+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
79071+ } else if (gr_acl_is_enabled()) {
79072+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
79073+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
79074+ } else {
79075+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
79076+ strcpy(buf, fmt);
79077+ }
79078+
79079+ return NO_FLOODING;
79080+}
79081+
79082+static void gr_log_middle(int audit, const char *msg, va_list ap)
79083+ __attribute__ ((format (printf, 2, 0)));
79084+
79085+static void gr_log_middle(int audit, const char *msg, va_list ap)
79086+{
79087+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
79088+ unsigned int len = strlen(buf);
79089+
79090+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
79091+
79092+ return;
79093+}
79094+
79095+static void gr_log_middle_varargs(int audit, const char *msg, ...)
79096+ __attribute__ ((format (printf, 2, 3)));
79097+
79098+static void gr_log_middle_varargs(int audit, const char *msg, ...)
79099+{
79100+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
79101+ unsigned int len = strlen(buf);
79102+ va_list ap;
79103+
79104+ va_start(ap, msg);
79105+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
79106+ va_end(ap);
79107+
79108+ return;
79109+}
79110+
79111+static void gr_log_end(int audit, int append_default)
79112+{
79113+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
79114+ if (append_default) {
79115+ struct task_struct *task = current;
79116+ struct task_struct *parent = task->real_parent;
79117+ const struct cred *cred = __task_cred(task);
79118+ const struct cred *pcred = __task_cred(parent);
79119+ unsigned int len = strlen(buf);
79120+
79121+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
79122+ }
79123+
79124+ printk("%s\n", buf);
79125+
79126+ return;
79127+}
79128+
79129+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
79130+{
79131+ int logtype;
79132+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
79133+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
79134+ void *voidptr = NULL;
79135+ int num1 = 0, num2 = 0;
79136+ unsigned long ulong1 = 0, ulong2 = 0;
79137+ struct dentry *dentry = NULL;
79138+ struct vfsmount *mnt = NULL;
79139+ struct file *file = NULL;
79140+ struct task_struct *task = NULL;
79141+ struct vm_area_struct *vma = NULL;
79142+ const struct cred *cred, *pcred;
79143+ va_list ap;
79144+
79145+ BEGIN_LOCKS(audit);
79146+ logtype = gr_log_start(audit);
79147+ if (logtype == FLOODING) {
79148+ END_LOCKS(audit);
79149+ return;
79150+ }
79151+ va_start(ap, argtypes);
79152+ switch (argtypes) {
79153+ case GR_TTYSNIFF:
79154+ task = va_arg(ap, struct task_struct *);
79155+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
79156+ break;
79157+ case GR_SYSCTL_HIDDEN:
79158+ str1 = va_arg(ap, char *);
79159+ gr_log_middle_varargs(audit, msg, result, str1);
79160+ break;
79161+ case GR_RBAC:
79162+ dentry = va_arg(ap, struct dentry *);
79163+ mnt = va_arg(ap, struct vfsmount *);
79164+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
79165+ break;
79166+ case GR_RBAC_STR:
79167+ dentry = va_arg(ap, struct dentry *);
79168+ mnt = va_arg(ap, struct vfsmount *);
79169+ str1 = va_arg(ap, char *);
79170+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
79171+ break;
79172+ case GR_STR_RBAC:
79173+ str1 = va_arg(ap, char *);
79174+ dentry = va_arg(ap, struct dentry *);
79175+ mnt = va_arg(ap, struct vfsmount *);
79176+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
79177+ break;
79178+ case GR_RBAC_MODE2:
79179+ dentry = va_arg(ap, struct dentry *);
79180+ mnt = va_arg(ap, struct vfsmount *);
79181+ str1 = va_arg(ap, char *);
79182+ str2 = va_arg(ap, char *);
79183+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
79184+ break;
79185+ case GR_RBAC_MODE3:
79186+ dentry = va_arg(ap, struct dentry *);
79187+ mnt = va_arg(ap, struct vfsmount *);
79188+ str1 = va_arg(ap, char *);
79189+ str2 = va_arg(ap, char *);
79190+ str3 = va_arg(ap, char *);
79191+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
79192+ break;
79193+ case GR_FILENAME:
79194+ dentry = va_arg(ap, struct dentry *);
79195+ mnt = va_arg(ap, struct vfsmount *);
79196+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
79197+ break;
79198+ case GR_STR_FILENAME:
79199+ str1 = va_arg(ap, char *);
79200+ dentry = va_arg(ap, struct dentry *);
79201+ mnt = va_arg(ap, struct vfsmount *);
79202+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
79203+ break;
79204+ case GR_FILENAME_STR:
79205+ dentry = va_arg(ap, struct dentry *);
79206+ mnt = va_arg(ap, struct vfsmount *);
79207+ str1 = va_arg(ap, char *);
79208+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
79209+ break;
79210+ case GR_FILENAME_TWO_INT:
79211+ dentry = va_arg(ap, struct dentry *);
79212+ mnt = va_arg(ap, struct vfsmount *);
79213+ num1 = va_arg(ap, int);
79214+ num2 = va_arg(ap, int);
79215+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
79216+ break;
79217+ case GR_FILENAME_TWO_INT_STR:
79218+ dentry = va_arg(ap, struct dentry *);
79219+ mnt = va_arg(ap, struct vfsmount *);
79220+ num1 = va_arg(ap, int);
79221+ num2 = va_arg(ap, int);
79222+ str1 = va_arg(ap, char *);
79223+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
79224+ break;
79225+ case GR_TEXTREL:
79226+ file = va_arg(ap, struct file *);
79227+ ulong1 = va_arg(ap, unsigned long);
79228+ ulong2 = va_arg(ap, unsigned long);
79229+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
79230+ break;
79231+ case GR_PTRACE:
79232+ task = va_arg(ap, struct task_struct *);
79233+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
79234+ break;
79235+ case GR_RESOURCE:
79236+ task = va_arg(ap, struct task_struct *);
79237+ cred = __task_cred(task);
79238+ pcred = __task_cred(task->real_parent);
79239+ ulong1 = va_arg(ap, unsigned long);
79240+ str1 = va_arg(ap, char *);
79241+ ulong2 = va_arg(ap, unsigned long);
79242+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
79243+ break;
79244+ case GR_CAP:
79245+ task = va_arg(ap, struct task_struct *);
79246+ cred = __task_cred(task);
79247+ pcred = __task_cred(task->real_parent);
79248+ str1 = va_arg(ap, char *);
79249+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
79250+ break;
79251+ case GR_SIG:
79252+ str1 = va_arg(ap, char *);
79253+ voidptr = va_arg(ap, void *);
79254+ gr_log_middle_varargs(audit, msg, str1, voidptr);
79255+ break;
79256+ case GR_SIG2:
79257+ task = va_arg(ap, struct task_struct *);
79258+ cred = __task_cred(task);
79259+ pcred = __task_cred(task->real_parent);
79260+ num1 = va_arg(ap, int);
79261+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
79262+ break;
79263+ case GR_CRASH1:
79264+ task = va_arg(ap, struct task_struct *);
79265+ cred = __task_cred(task);
79266+ pcred = __task_cred(task->real_parent);
79267+ ulong1 = va_arg(ap, unsigned long);
79268+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
79269+ break;
79270+ case GR_CRASH2:
79271+ task = va_arg(ap, struct task_struct *);
79272+ cred = __task_cred(task);
79273+ pcred = __task_cred(task->real_parent);
79274+ ulong1 = va_arg(ap, unsigned long);
79275+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
79276+ break;
79277+ case GR_RWXMAP:
79278+ file = va_arg(ap, struct file *);
79279+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
79280+ break;
79281+ case GR_RWXMAPVMA:
79282+ vma = va_arg(ap, struct vm_area_struct *);
79283+ if (vma->vm_file)
79284+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
79285+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
79286+ str1 = "<stack>";
79287+ else if (vma->vm_start <= current->mm->brk &&
79288+ vma->vm_end >= current->mm->start_brk)
79289+ str1 = "<heap>";
79290+ else
79291+ str1 = "<anonymous mapping>";
79292+ gr_log_middle_varargs(audit, msg, str1);
79293+ break;
79294+ case GR_PSACCT:
79295+ {
79296+ unsigned int wday, cday;
79297+ __u8 whr, chr;
79298+ __u8 wmin, cmin;
79299+ __u8 wsec, csec;
79300+ char cur_tty[64] = { 0 };
79301+ char parent_tty[64] = { 0 };
79302+
79303+ task = va_arg(ap, struct task_struct *);
79304+ wday = va_arg(ap, unsigned int);
79305+ cday = va_arg(ap, unsigned int);
79306+ whr = va_arg(ap, int);
79307+ chr = va_arg(ap, int);
79308+ wmin = va_arg(ap, int);
79309+ cmin = va_arg(ap, int);
79310+ wsec = va_arg(ap, int);
79311+ csec = va_arg(ap, int);
79312+ ulong1 = va_arg(ap, unsigned long);
79313+ cred = __task_cred(task);
79314+ pcred = __task_cred(task->real_parent);
79315+
79316+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
79317+ }
79318+ break;
79319+ default:
79320+ gr_log_middle(audit, msg, ap);
79321+ }
79322+ va_end(ap);
79323+ // these don't need DEFAULTSECARGS printed on the end
79324+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
79325+ gr_log_end(audit, 0);
79326+ else
79327+ gr_log_end(audit, 1);
79328+ END_LOCKS(audit);
79329+}
79330diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
79331new file mode 100644
79332index 0000000..0e39d8c
79333--- /dev/null
79334+++ b/grsecurity/grsec_mem.c
79335@@ -0,0 +1,48 @@
79336+#include <linux/kernel.h>
79337+#include <linux/sched.h>
79338+#include <linux/mm.h>
79339+#include <linux/mman.h>
79340+#include <linux/module.h>
79341+#include <linux/grinternal.h>
79342+
79343+void gr_handle_msr_write(void)
79344+{
79345+ gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
79346+ return;
79347+}
79348+EXPORT_SYMBOL_GPL(gr_handle_msr_write);
79349+
79350+void
79351+gr_handle_ioperm(void)
79352+{
79353+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
79354+ return;
79355+}
79356+
79357+void
79358+gr_handle_iopl(void)
79359+{
79360+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
79361+ return;
79362+}
79363+
79364+void
79365+gr_handle_mem_readwrite(u64 from, u64 to)
79366+{
79367+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
79368+ return;
79369+}
79370+
79371+void
79372+gr_handle_vm86(void)
79373+{
79374+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
79375+ return;
79376+}
79377+
79378+void
79379+gr_log_badprocpid(const char *entry)
79380+{
79381+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
79382+ return;
79383+}
79384diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
79385new file mode 100644
79386index 0000000..6f9eb73
79387--- /dev/null
79388+++ b/grsecurity/grsec_mount.c
79389@@ -0,0 +1,65 @@
79390+#include <linux/kernel.h>
79391+#include <linux/sched.h>
79392+#include <linux/mount.h>
79393+#include <linux/major.h>
79394+#include <linux/grsecurity.h>
79395+#include <linux/grinternal.h>
79396+
79397+void
79398+gr_log_remount(const char *devname, const int retval)
79399+{
79400+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
79401+ if (grsec_enable_mount && (retval >= 0))
79402+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
79403+#endif
79404+ return;
79405+}
79406+
79407+void
79408+gr_log_unmount(const char *devname, const int retval)
79409+{
79410+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
79411+ if (grsec_enable_mount && (retval >= 0))
79412+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
79413+#endif
79414+ return;
79415+}
79416+
79417+void
79418+gr_log_mount(const char *from, struct path *to, const int retval)
79419+{
79420+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
79421+ if (grsec_enable_mount && (retval >= 0))
79422+ gr_log_str_fs(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to->dentry, to->mnt);
79423+#endif
79424+ return;
79425+}
79426+
79427+int
79428+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
79429+{
79430+#ifdef CONFIG_GRKERNSEC_ROFS
79431+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
79432+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
79433+ return -EPERM;
79434+ } else
79435+ return 0;
79436+#endif
79437+ return 0;
79438+}
79439+
79440+int
79441+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
79442+{
79443+#ifdef CONFIG_GRKERNSEC_ROFS
79444+ struct inode *inode = dentry->d_inode;
79445+
79446+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
79447+ inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) {
79448+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
79449+ return -EPERM;
79450+ } else
79451+ return 0;
79452+#endif
79453+ return 0;
79454+}
79455diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
79456new file mode 100644
79457index 0000000..6ee9d50
79458--- /dev/null
79459+++ b/grsecurity/grsec_pax.c
79460@@ -0,0 +1,45 @@
79461+#include <linux/kernel.h>
79462+#include <linux/sched.h>
79463+#include <linux/mm.h>
79464+#include <linux/file.h>
79465+#include <linux/grinternal.h>
79466+#include <linux/grsecurity.h>
79467+
79468+void
79469+gr_log_textrel(struct vm_area_struct * vma)
79470+{
79471+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
79472+ if (grsec_enable_log_rwxmaps)
79473+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
79474+#endif
79475+ return;
79476+}
79477+
79478+void gr_log_ptgnustack(struct file *file)
79479+{
79480+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
79481+ if (grsec_enable_log_rwxmaps)
79482+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
79483+#endif
79484+ return;
79485+}
79486+
79487+void
79488+gr_log_rwxmmap(struct file *file)
79489+{
79490+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
79491+ if (grsec_enable_log_rwxmaps)
79492+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
79493+#endif
79494+ return;
79495+}
79496+
79497+void
79498+gr_log_rwxmprotect(struct vm_area_struct *vma)
79499+{
79500+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
79501+ if (grsec_enable_log_rwxmaps)
79502+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
79503+#endif
79504+ return;
79505+}
79506diff --git a/grsecurity/grsec_proc.c b/grsecurity/grsec_proc.c
79507new file mode 100644
79508index 0000000..2005a3a
79509--- /dev/null
79510+++ b/grsecurity/grsec_proc.c
79511@@ -0,0 +1,20 @@
79512+#include <linux/kernel.h>
79513+#include <linux/sched.h>
79514+#include <linux/grsecurity.h>
79515+#include <linux/grinternal.h>
79516+
79517+int gr_proc_is_restricted(void)
79518+{
79519+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
79520+ const struct cred *cred = current_cred();
79521+#endif
79522+
79523+#ifdef CONFIG_GRKERNSEC_PROC_USER
79524+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
79525+ return -EACCES;
79526+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
79527+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
79528+ return -EACCES;
79529+#endif
79530+ return 0;
79531+}
79532diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
79533new file mode 100644
79534index 0000000..f7f29aa
79535--- /dev/null
79536+++ b/grsecurity/grsec_ptrace.c
79537@@ -0,0 +1,30 @@
79538+#include <linux/kernel.h>
79539+#include <linux/sched.h>
79540+#include <linux/grinternal.h>
79541+#include <linux/security.h>
79542+
79543+void
79544+gr_audit_ptrace(struct task_struct *task)
79545+{
79546+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
79547+ if (grsec_enable_audit_ptrace)
79548+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
79549+#endif
79550+ return;
79551+}
79552+
79553+int
79554+gr_ptrace_readexec(struct file *file, int unsafe_flags)
79555+{
79556+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
79557+ const struct dentry *dentry = file->f_path.dentry;
79558+ const struct vfsmount *mnt = file->f_path.mnt;
79559+
79560+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
79561+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
79562+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
79563+ return -EACCES;
79564+ }
79565+#endif
79566+ return 0;
79567+}
79568diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
79569new file mode 100644
79570index 0000000..3860c7e
79571--- /dev/null
79572+++ b/grsecurity/grsec_sig.c
79573@@ -0,0 +1,236 @@
79574+#include <linux/kernel.h>
79575+#include <linux/sched.h>
79576+#include <linux/fs.h>
79577+#include <linux/delay.h>
79578+#include <linux/grsecurity.h>
79579+#include <linux/grinternal.h>
79580+#include <linux/hardirq.h>
79581+
79582+char *signames[] = {
79583+ [SIGSEGV] = "Segmentation fault",
79584+ [SIGILL] = "Illegal instruction",
79585+ [SIGABRT] = "Abort",
79586+ [SIGBUS] = "Invalid alignment/Bus error"
79587+};
79588+
79589+void
79590+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
79591+{
79592+#ifdef CONFIG_GRKERNSEC_SIGNAL
79593+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
79594+ (sig == SIGABRT) || (sig == SIGBUS))) {
79595+ if (task_pid_nr(t) == task_pid_nr(current)) {
79596+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
79597+ } else {
79598+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
79599+ }
79600+ }
79601+#endif
79602+ return;
79603+}
79604+
79605+int
79606+gr_handle_signal(const struct task_struct *p, const int sig)
79607+{
79608+#ifdef CONFIG_GRKERNSEC
79609+ /* ignore the 0 signal for protected task checks */
79610+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
79611+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
79612+ return -EPERM;
79613+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
79614+ return -EPERM;
79615+ }
79616+#endif
79617+ return 0;
79618+}
79619+
79620+#ifdef CONFIG_GRKERNSEC
79621+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
79622+
79623+int gr_fake_force_sig(int sig, struct task_struct *t)
79624+{
79625+ unsigned long int flags;
79626+ int ret, blocked, ignored;
79627+ struct k_sigaction *action;
79628+
79629+ spin_lock_irqsave(&t->sighand->siglock, flags);
79630+ action = &t->sighand->action[sig-1];
79631+ ignored = action->sa.sa_handler == SIG_IGN;
79632+ blocked = sigismember(&t->blocked, sig);
79633+ if (blocked || ignored) {
79634+ action->sa.sa_handler = SIG_DFL;
79635+ if (blocked) {
79636+ sigdelset(&t->blocked, sig);
79637+ recalc_sigpending_and_wake(t);
79638+ }
79639+ }
79640+ if (action->sa.sa_handler == SIG_DFL)
79641+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
79642+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
79643+
79644+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
79645+
79646+ return ret;
79647+}
79648+#endif
79649+
79650+#define GR_USER_BAN_TIME (15 * 60)
79651+#define GR_DAEMON_BRUTE_TIME (30 * 60)
79652+
79653+void gr_handle_brute_attach(int dumpable)
79654+{
79655+#ifdef CONFIG_GRKERNSEC_BRUTE
79656+ struct task_struct *p = current;
79657+ kuid_t uid = GLOBAL_ROOT_UID;
79658+ int daemon = 0;
79659+
79660+ if (!grsec_enable_brute)
79661+ return;
79662+
79663+ rcu_read_lock();
79664+ read_lock(&tasklist_lock);
79665+ read_lock(&grsec_exec_file_lock);
79666+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
79667+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
79668+ p->real_parent->brute = 1;
79669+ daemon = 1;
79670+ } else {
79671+ const struct cred *cred = __task_cred(p), *cred2;
79672+ struct task_struct *tsk, *tsk2;
79673+
79674+ if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
79675+ struct user_struct *user;
79676+
79677+ uid = cred->uid;
79678+
79679+ /* this is put upon execution past expiration */
79680+ user = find_user(uid);
79681+ if (user == NULL)
79682+ goto unlock;
79683+ user->suid_banned = 1;
79684+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
79685+ if (user->suid_ban_expires == ~0UL)
79686+ user->suid_ban_expires--;
79687+
79688+ /* only kill other threads of the same binary, from the same user */
79689+ do_each_thread(tsk2, tsk) {
79690+ cred2 = __task_cred(tsk);
79691+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
79692+ gr_fake_force_sig(SIGKILL, tsk);
79693+ } while_each_thread(tsk2, tsk);
79694+ }
79695+ }
79696+unlock:
79697+ read_unlock(&grsec_exec_file_lock);
79698+ read_unlock(&tasklist_lock);
79699+ rcu_read_unlock();
79700+
79701+ if (gr_is_global_nonroot(uid))
79702+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
79703+ else if (daemon)
79704+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
79705+
79706+#endif
79707+ return;
79708+}
79709+
79710+void gr_handle_brute_check(void)
79711+{
79712+#ifdef CONFIG_GRKERNSEC_BRUTE
79713+ struct task_struct *p = current;
79714+
79715+ if (unlikely(p->brute)) {
79716+ if (!grsec_enable_brute)
79717+ p->brute = 0;
79718+ else if (time_before(get_seconds(), p->brute_expires))
79719+ msleep(30 * 1000);
79720+ }
79721+#endif
79722+ return;
79723+}
79724+
79725+void gr_handle_kernel_exploit(void)
79726+{
79727+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
79728+ const struct cred *cred;
79729+ struct task_struct *tsk, *tsk2;
79730+ struct user_struct *user;
79731+ kuid_t uid;
79732+
79733+ if (in_irq() || in_serving_softirq() || in_nmi())
79734+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
79735+
79736+ uid = current_uid();
79737+
79738+ if (gr_is_global_root(uid))
79739+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
79740+ else {
79741+ /* kill all the processes of this user, hold a reference
79742+ to their creds struct, and prevent them from creating
79743+ another process until system reset
79744+ */
79745+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
79746+ GR_GLOBAL_UID(uid));
79747+ /* we intentionally leak this ref */
79748+ user = get_uid(current->cred->user);
79749+ if (user)
79750+ user->kernel_banned = 1;
79751+
79752+ /* kill all processes of this user */
79753+ read_lock(&tasklist_lock);
79754+ do_each_thread(tsk2, tsk) {
79755+ cred = __task_cred(tsk);
79756+ if (uid_eq(cred->uid, uid))
79757+ gr_fake_force_sig(SIGKILL, tsk);
79758+ } while_each_thread(tsk2, tsk);
79759+ read_unlock(&tasklist_lock);
79760+ }
79761+#endif
79762+}
79763+
79764+#ifdef CONFIG_GRKERNSEC_BRUTE
79765+static bool suid_ban_expired(struct user_struct *user)
79766+{
79767+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
79768+ user->suid_banned = 0;
79769+ user->suid_ban_expires = 0;
79770+ free_uid(user);
79771+ return true;
79772+ }
79773+
79774+ return false;
79775+}
79776+#endif
79777+
79778+int gr_process_kernel_exec_ban(void)
79779+{
79780+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
79781+ if (unlikely(current->cred->user->kernel_banned))
79782+ return -EPERM;
79783+#endif
79784+ return 0;
79785+}
79786+
79787+int gr_process_kernel_setuid_ban(struct user_struct *user)
79788+{
79789+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
79790+ if (unlikely(user->kernel_banned))
79791+ gr_fake_force_sig(SIGKILL, current);
79792+#endif
79793+ return 0;
79794+}
79795+
79796+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
79797+{
79798+#ifdef CONFIG_GRKERNSEC_BRUTE
79799+ struct user_struct *user = current->cred->user;
79800+ if (unlikely(user->suid_banned)) {
79801+ if (suid_ban_expired(user))
79802+ return 0;
79803+ /* disallow execution of suid binaries only */
79804+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
79805+ return -EPERM;
79806+ }
79807+#endif
79808+ return 0;
79809+}
79810diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
79811new file mode 100644
79812index 0000000..a523bd2
79813--- /dev/null
79814+++ b/grsecurity/grsec_sock.c
79815@@ -0,0 +1,244 @@
79816+#include <linux/kernel.h>
79817+#include <linux/module.h>
79818+#include <linux/sched.h>
79819+#include <linux/file.h>
79820+#include <linux/net.h>
79821+#include <linux/in.h>
79822+#include <linux/ip.h>
79823+#include <net/sock.h>
79824+#include <net/inet_sock.h>
79825+#include <linux/grsecurity.h>
79826+#include <linux/grinternal.h>
79827+#include <linux/gracl.h>
79828+
79829+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
79830+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
79831+
79832+EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
79833+EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
79834+
79835+#ifdef CONFIG_UNIX_MODULE
79836+EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
79837+EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
79838+EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
79839+EXPORT_SYMBOL_GPL(gr_handle_create);
79840+#endif
79841+
79842+#ifdef CONFIG_GRKERNSEC
79843+#define gr_conn_table_size 32749
79844+struct conn_table_entry {
79845+ struct conn_table_entry *next;
79846+ struct signal_struct *sig;
79847+};
79848+
79849+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
79850+DEFINE_SPINLOCK(gr_conn_table_lock);
79851+
79852+extern const char * gr_socktype_to_name(unsigned char type);
79853+extern const char * gr_proto_to_name(unsigned char proto);
79854+extern const char * gr_sockfamily_to_name(unsigned char family);
79855+
79856+static int
79857+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
79858+{
79859+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
79860+}
79861+
79862+static int
79863+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
79864+ __u16 sport, __u16 dport)
79865+{
79866+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
79867+ sig->gr_sport == sport && sig->gr_dport == dport))
79868+ return 1;
79869+ else
79870+ return 0;
79871+}
79872+
79873+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
79874+{
79875+ struct conn_table_entry **match;
79876+ unsigned int index;
79877+
79878+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
79879+ sig->gr_sport, sig->gr_dport,
79880+ gr_conn_table_size);
79881+
79882+ newent->sig = sig;
79883+
79884+ match = &gr_conn_table[index];
79885+ newent->next = *match;
79886+ *match = newent;
79887+
79888+ return;
79889+}
79890+
79891+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
79892+{
79893+ struct conn_table_entry *match, *last = NULL;
79894+ unsigned int index;
79895+
79896+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
79897+ sig->gr_sport, sig->gr_dport,
79898+ gr_conn_table_size);
79899+
79900+ match = gr_conn_table[index];
79901+ while (match && !conn_match(match->sig,
79902+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
79903+ sig->gr_dport)) {
79904+ last = match;
79905+ match = match->next;
79906+ }
79907+
79908+ if (match) {
79909+ if (last)
79910+ last->next = match->next;
79911+ else
79912+ gr_conn_table[index] = NULL;
79913+ kfree(match);
79914+ }
79915+
79916+ return;
79917+}
79918+
79919+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
79920+ __u16 sport, __u16 dport)
79921+{
79922+ struct conn_table_entry *match;
79923+ unsigned int index;
79924+
79925+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
79926+
79927+ match = gr_conn_table[index];
79928+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
79929+ match = match->next;
79930+
79931+ if (match)
79932+ return match->sig;
79933+ else
79934+ return NULL;
79935+}
79936+
79937+#endif
79938+
79939+void gr_update_task_in_ip_table(const struct inet_sock *inet)
79940+{
79941+#ifdef CONFIG_GRKERNSEC
79942+ struct signal_struct *sig = current->signal;
79943+ struct conn_table_entry *newent;
79944+
79945+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
79946+ if (newent == NULL)
79947+ return;
79948+ /* no bh lock needed since we are called with bh disabled */
79949+ spin_lock(&gr_conn_table_lock);
79950+ gr_del_task_from_ip_table_nolock(sig);
79951+ sig->gr_saddr = inet->inet_rcv_saddr;
79952+ sig->gr_daddr = inet->inet_daddr;
79953+ sig->gr_sport = inet->inet_sport;
79954+ sig->gr_dport = inet->inet_dport;
79955+ gr_add_to_task_ip_table_nolock(sig, newent);
79956+ spin_unlock(&gr_conn_table_lock);
79957+#endif
79958+ return;
79959+}
79960+
79961+void gr_del_task_from_ip_table(struct task_struct *task)
79962+{
79963+#ifdef CONFIG_GRKERNSEC
79964+ spin_lock_bh(&gr_conn_table_lock);
79965+ gr_del_task_from_ip_table_nolock(task->signal);
79966+ spin_unlock_bh(&gr_conn_table_lock);
79967+#endif
79968+ return;
79969+}
79970+
79971+void
79972+gr_attach_curr_ip(const struct sock *sk)
79973+{
79974+#ifdef CONFIG_GRKERNSEC
79975+ struct signal_struct *p, *set;
79976+ const struct inet_sock *inet = inet_sk(sk);
79977+
79978+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
79979+ return;
79980+
79981+ set = current->signal;
79982+
79983+ spin_lock_bh(&gr_conn_table_lock);
79984+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
79985+ inet->inet_dport, inet->inet_sport);
79986+ if (unlikely(p != NULL)) {
79987+ set->curr_ip = p->curr_ip;
79988+ set->used_accept = 1;
79989+ gr_del_task_from_ip_table_nolock(p);
79990+ spin_unlock_bh(&gr_conn_table_lock);
79991+ return;
79992+ }
79993+ spin_unlock_bh(&gr_conn_table_lock);
79994+
79995+ set->curr_ip = inet->inet_daddr;
79996+ set->used_accept = 1;
79997+#endif
79998+ return;
79999+}
80000+
80001+int
80002+gr_handle_sock_all(const int family, const int type, const int protocol)
80003+{
80004+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
80005+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
80006+ (family != AF_UNIX)) {
80007+ if (family == AF_INET)
80008+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
80009+ else
80010+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
80011+ return -EACCES;
80012+ }
80013+#endif
80014+ return 0;
80015+}
80016+
80017+int
80018+gr_handle_sock_server(const struct sockaddr *sck)
80019+{
80020+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
80021+ if (grsec_enable_socket_server &&
80022+ in_group_p(grsec_socket_server_gid) &&
80023+ sck && (sck->sa_family != AF_UNIX) &&
80024+ (sck->sa_family != AF_LOCAL)) {
80025+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
80026+ return -EACCES;
80027+ }
80028+#endif
80029+ return 0;
80030+}
80031+
80032+int
80033+gr_handle_sock_server_other(const struct sock *sck)
80034+{
80035+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
80036+ if (grsec_enable_socket_server &&
80037+ in_group_p(grsec_socket_server_gid) &&
80038+ sck && (sck->sk_family != AF_UNIX) &&
80039+ (sck->sk_family != AF_LOCAL)) {
80040+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
80041+ return -EACCES;
80042+ }
80043+#endif
80044+ return 0;
80045+}
80046+
80047+int
80048+gr_handle_sock_client(const struct sockaddr *sck)
80049+{
80050+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
80051+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
80052+ sck && (sck->sa_family != AF_UNIX) &&
80053+ (sck->sa_family != AF_LOCAL)) {
80054+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
80055+ return -EACCES;
80056+ }
80057+#endif
80058+ return 0;
80059+}
80060diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
80061new file mode 100644
80062index 0000000..cce889e
80063--- /dev/null
80064+++ b/grsecurity/grsec_sysctl.c
80065@@ -0,0 +1,488 @@
80066+#include <linux/kernel.h>
80067+#include <linux/sched.h>
80068+#include <linux/sysctl.h>
80069+#include <linux/grsecurity.h>
80070+#include <linux/grinternal.h>
80071+
80072+int
80073+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
80074+{
80075+#ifdef CONFIG_GRKERNSEC_SYSCTL
80076+ if (dirname == NULL || name == NULL)
80077+ return 0;
80078+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
80079+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
80080+ return -EACCES;
80081+ }
80082+#endif
80083+ return 0;
80084+}
80085+
80086+#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
80087+static int __maybe_unused __read_only one = 1;
80088+#endif
80089+
80090+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
80091+ defined(CONFIG_GRKERNSEC_DENYUSB)
80092+struct ctl_table grsecurity_table[] = {
80093+#ifdef CONFIG_GRKERNSEC_SYSCTL
80094+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
80095+#ifdef CONFIG_GRKERNSEC_IO
80096+ {
80097+ .procname = "disable_priv_io",
80098+ .data = &grsec_disable_privio,
80099+ .maxlen = sizeof(int),
80100+ .mode = 0600,
80101+ .proc_handler = &proc_dointvec,
80102+ },
80103+#endif
80104+#endif
80105+#ifdef CONFIG_GRKERNSEC_LINK
80106+ {
80107+ .procname = "linking_restrictions",
80108+ .data = &grsec_enable_link,
80109+ .maxlen = sizeof(int),
80110+ .mode = 0600,
80111+ .proc_handler = &proc_dointvec,
80112+ },
80113+#endif
80114+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
80115+ {
80116+ .procname = "enforce_symlinksifowner",
80117+ .data = &grsec_enable_symlinkown,
80118+ .maxlen = sizeof(int),
80119+ .mode = 0600,
80120+ .proc_handler = &proc_dointvec,
80121+ },
80122+ {
80123+ .procname = "symlinkown_gid",
80124+ .data = &grsec_symlinkown_gid,
80125+ .maxlen = sizeof(int),
80126+ .mode = 0600,
80127+ .proc_handler = &proc_dointvec,
80128+ },
80129+#endif
80130+#ifdef CONFIG_GRKERNSEC_BRUTE
80131+ {
80132+ .procname = "deter_bruteforce",
80133+ .data = &grsec_enable_brute,
80134+ .maxlen = sizeof(int),
80135+ .mode = 0600,
80136+ .proc_handler = &proc_dointvec,
80137+ },
80138+#endif
80139+#ifdef CONFIG_GRKERNSEC_FIFO
80140+ {
80141+ .procname = "fifo_restrictions",
80142+ .data = &grsec_enable_fifo,
80143+ .maxlen = sizeof(int),
80144+ .mode = 0600,
80145+ .proc_handler = &proc_dointvec,
80146+ },
80147+#endif
80148+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
80149+ {
80150+ .procname = "ptrace_readexec",
80151+ .data = &grsec_enable_ptrace_readexec,
80152+ .maxlen = sizeof(int),
80153+ .mode = 0600,
80154+ .proc_handler = &proc_dointvec,
80155+ },
80156+#endif
80157+#ifdef CONFIG_GRKERNSEC_SETXID
80158+ {
80159+ .procname = "consistent_setxid",
80160+ .data = &grsec_enable_setxid,
80161+ .maxlen = sizeof(int),
80162+ .mode = 0600,
80163+ .proc_handler = &proc_dointvec,
80164+ },
80165+#endif
80166+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80167+ {
80168+ .procname = "ip_blackhole",
80169+ .data = &grsec_enable_blackhole,
80170+ .maxlen = sizeof(int),
80171+ .mode = 0600,
80172+ .proc_handler = &proc_dointvec,
80173+ },
80174+ {
80175+ .procname = "lastack_retries",
80176+ .data = &grsec_lastack_retries,
80177+ .maxlen = sizeof(int),
80178+ .mode = 0600,
80179+ .proc_handler = &proc_dointvec,
80180+ },
80181+#endif
80182+#ifdef CONFIG_GRKERNSEC_EXECLOG
80183+ {
80184+ .procname = "exec_logging",
80185+ .data = &grsec_enable_execlog,
80186+ .maxlen = sizeof(int),
80187+ .mode = 0600,
80188+ .proc_handler = &proc_dointvec,
80189+ },
80190+#endif
80191+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
80192+ {
80193+ .procname = "rwxmap_logging",
80194+ .data = &grsec_enable_log_rwxmaps,
80195+ .maxlen = sizeof(int),
80196+ .mode = 0600,
80197+ .proc_handler = &proc_dointvec,
80198+ },
80199+#endif
80200+#ifdef CONFIG_GRKERNSEC_SIGNAL
80201+ {
80202+ .procname = "signal_logging",
80203+ .data = &grsec_enable_signal,
80204+ .maxlen = sizeof(int),
80205+ .mode = 0600,
80206+ .proc_handler = &proc_dointvec,
80207+ },
80208+#endif
80209+#ifdef CONFIG_GRKERNSEC_FORKFAIL
80210+ {
80211+ .procname = "forkfail_logging",
80212+ .data = &grsec_enable_forkfail,
80213+ .maxlen = sizeof(int),
80214+ .mode = 0600,
80215+ .proc_handler = &proc_dointvec,
80216+ },
80217+#endif
80218+#ifdef CONFIG_GRKERNSEC_TIME
80219+ {
80220+ .procname = "timechange_logging",
80221+ .data = &grsec_enable_time,
80222+ .maxlen = sizeof(int),
80223+ .mode = 0600,
80224+ .proc_handler = &proc_dointvec,
80225+ },
80226+#endif
80227+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
80228+ {
80229+ .procname = "chroot_deny_shmat",
80230+ .data = &grsec_enable_chroot_shmat,
80231+ .maxlen = sizeof(int),
80232+ .mode = 0600,
80233+ .proc_handler = &proc_dointvec,
80234+ },
80235+#endif
80236+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
80237+ {
80238+ .procname = "chroot_deny_unix",
80239+ .data = &grsec_enable_chroot_unix,
80240+ .maxlen = sizeof(int),
80241+ .mode = 0600,
80242+ .proc_handler = &proc_dointvec,
80243+ },
80244+#endif
80245+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
80246+ {
80247+ .procname = "chroot_deny_mount",
80248+ .data = &grsec_enable_chroot_mount,
80249+ .maxlen = sizeof(int),
80250+ .mode = 0600,
80251+ .proc_handler = &proc_dointvec,
80252+ },
80253+#endif
80254+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
80255+ {
80256+ .procname = "chroot_deny_fchdir",
80257+ .data = &grsec_enable_chroot_fchdir,
80258+ .maxlen = sizeof(int),
80259+ .mode = 0600,
80260+ .proc_handler = &proc_dointvec,
80261+ },
80262+#endif
80263+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
80264+ {
80265+ .procname = "chroot_deny_chroot",
80266+ .data = &grsec_enable_chroot_double,
80267+ .maxlen = sizeof(int),
80268+ .mode = 0600,
80269+ .proc_handler = &proc_dointvec,
80270+ },
80271+#endif
80272+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
80273+ {
80274+ .procname = "chroot_deny_pivot",
80275+ .data = &grsec_enable_chroot_pivot,
80276+ .maxlen = sizeof(int),
80277+ .mode = 0600,
80278+ .proc_handler = &proc_dointvec,
80279+ },
80280+#endif
80281+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
80282+ {
80283+ .procname = "chroot_enforce_chdir",
80284+ .data = &grsec_enable_chroot_chdir,
80285+ .maxlen = sizeof(int),
80286+ .mode = 0600,
80287+ .proc_handler = &proc_dointvec,
80288+ },
80289+#endif
80290+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
80291+ {
80292+ .procname = "chroot_deny_chmod",
80293+ .data = &grsec_enable_chroot_chmod,
80294+ .maxlen = sizeof(int),
80295+ .mode = 0600,
80296+ .proc_handler = &proc_dointvec,
80297+ },
80298+#endif
80299+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
80300+ {
80301+ .procname = "chroot_deny_mknod",
80302+ .data = &grsec_enable_chroot_mknod,
80303+ .maxlen = sizeof(int),
80304+ .mode = 0600,
80305+ .proc_handler = &proc_dointvec,
80306+ },
80307+#endif
80308+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
80309+ {
80310+ .procname = "chroot_restrict_nice",
80311+ .data = &grsec_enable_chroot_nice,
80312+ .maxlen = sizeof(int),
80313+ .mode = 0600,
80314+ .proc_handler = &proc_dointvec,
80315+ },
80316+#endif
80317+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
80318+ {
80319+ .procname = "chroot_execlog",
80320+ .data = &grsec_enable_chroot_execlog,
80321+ .maxlen = sizeof(int),
80322+ .mode = 0600,
80323+ .proc_handler = &proc_dointvec,
80324+ },
80325+#endif
80326+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
80327+ {
80328+ .procname = "chroot_caps",
80329+ .data = &grsec_enable_chroot_caps,
80330+ .maxlen = sizeof(int),
80331+ .mode = 0600,
80332+ .proc_handler = &proc_dointvec,
80333+ },
80334+#endif
80335+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
80336+ {
80337+ .procname = "chroot_deny_bad_rename",
80338+ .data = &grsec_enable_chroot_rename,
80339+ .maxlen = sizeof(int),
80340+ .mode = 0600,
80341+ .proc_handler = &proc_dointvec,
80342+ },
80343+#endif
80344+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
80345+ {
80346+ .procname = "chroot_deny_sysctl",
80347+ .data = &grsec_enable_chroot_sysctl,
80348+ .maxlen = sizeof(int),
80349+ .mode = 0600,
80350+ .proc_handler = &proc_dointvec,
80351+ },
80352+#endif
80353+#ifdef CONFIG_GRKERNSEC_TPE
80354+ {
80355+ .procname = "tpe",
80356+ .data = &grsec_enable_tpe,
80357+ .maxlen = sizeof(int),
80358+ .mode = 0600,
80359+ .proc_handler = &proc_dointvec,
80360+ },
80361+ {
80362+ .procname = "tpe_gid",
80363+ .data = &grsec_tpe_gid,
80364+ .maxlen = sizeof(int),
80365+ .mode = 0600,
80366+ .proc_handler = &proc_dointvec,
80367+ },
80368+#endif
80369+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
80370+ {
80371+ .procname = "tpe_invert",
80372+ .data = &grsec_enable_tpe_invert,
80373+ .maxlen = sizeof(int),
80374+ .mode = 0600,
80375+ .proc_handler = &proc_dointvec,
80376+ },
80377+#endif
80378+#ifdef CONFIG_GRKERNSEC_TPE_ALL
80379+ {
80380+ .procname = "tpe_restrict_all",
80381+ .data = &grsec_enable_tpe_all,
80382+ .maxlen = sizeof(int),
80383+ .mode = 0600,
80384+ .proc_handler = &proc_dointvec,
80385+ },
80386+#endif
80387+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
80388+ {
80389+ .procname = "socket_all",
80390+ .data = &grsec_enable_socket_all,
80391+ .maxlen = sizeof(int),
80392+ .mode = 0600,
80393+ .proc_handler = &proc_dointvec,
80394+ },
80395+ {
80396+ .procname = "socket_all_gid",
80397+ .data = &grsec_socket_all_gid,
80398+ .maxlen = sizeof(int),
80399+ .mode = 0600,
80400+ .proc_handler = &proc_dointvec,
80401+ },
80402+#endif
80403+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
80404+ {
80405+ .procname = "socket_client",
80406+ .data = &grsec_enable_socket_client,
80407+ .maxlen = sizeof(int),
80408+ .mode = 0600,
80409+ .proc_handler = &proc_dointvec,
80410+ },
80411+ {
80412+ .procname = "socket_client_gid",
80413+ .data = &grsec_socket_client_gid,
80414+ .maxlen = sizeof(int),
80415+ .mode = 0600,
80416+ .proc_handler = &proc_dointvec,
80417+ },
80418+#endif
80419+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
80420+ {
80421+ .procname = "socket_server",
80422+ .data = &grsec_enable_socket_server,
80423+ .maxlen = sizeof(int),
80424+ .mode = 0600,
80425+ .proc_handler = &proc_dointvec,
80426+ },
80427+ {
80428+ .procname = "socket_server_gid",
80429+ .data = &grsec_socket_server_gid,
80430+ .maxlen = sizeof(int),
80431+ .mode = 0600,
80432+ .proc_handler = &proc_dointvec,
80433+ },
80434+#endif
80435+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
80436+ {
80437+ .procname = "audit_group",
80438+ .data = &grsec_enable_group,
80439+ .maxlen = sizeof(int),
80440+ .mode = 0600,
80441+ .proc_handler = &proc_dointvec,
80442+ },
80443+ {
80444+ .procname = "audit_gid",
80445+ .data = &grsec_audit_gid,
80446+ .maxlen = sizeof(int),
80447+ .mode = 0600,
80448+ .proc_handler = &proc_dointvec,
80449+ },
80450+#endif
80451+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
80452+ {
80453+ .procname = "audit_chdir",
80454+ .data = &grsec_enable_chdir,
80455+ .maxlen = sizeof(int),
80456+ .mode = 0600,
80457+ .proc_handler = &proc_dointvec,
80458+ },
80459+#endif
80460+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
80461+ {
80462+ .procname = "audit_mount",
80463+ .data = &grsec_enable_mount,
80464+ .maxlen = sizeof(int),
80465+ .mode = 0600,
80466+ .proc_handler = &proc_dointvec,
80467+ },
80468+#endif
80469+#ifdef CONFIG_GRKERNSEC_DMESG
80470+ {
80471+ .procname = "dmesg",
80472+ .data = &grsec_enable_dmesg,
80473+ .maxlen = sizeof(int),
80474+ .mode = 0600,
80475+ .proc_handler = &proc_dointvec,
80476+ },
80477+#endif
80478+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
80479+ {
80480+ .procname = "chroot_findtask",
80481+ .data = &grsec_enable_chroot_findtask,
80482+ .maxlen = sizeof(int),
80483+ .mode = 0600,
80484+ .proc_handler = &proc_dointvec,
80485+ },
80486+#endif
80487+#ifdef CONFIG_GRKERNSEC_RESLOG
80488+ {
80489+ .procname = "resource_logging",
80490+ .data = &grsec_resource_logging,
80491+ .maxlen = sizeof(int),
80492+ .mode = 0600,
80493+ .proc_handler = &proc_dointvec,
80494+ },
80495+#endif
80496+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
80497+ {
80498+ .procname = "audit_ptrace",
80499+ .data = &grsec_enable_audit_ptrace,
80500+ .maxlen = sizeof(int),
80501+ .mode = 0600,
80502+ .proc_handler = &proc_dointvec,
80503+ },
80504+#endif
80505+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
80506+ {
80507+ .procname = "harden_ptrace",
80508+ .data = &grsec_enable_harden_ptrace,
80509+ .maxlen = sizeof(int),
80510+ .mode = 0600,
80511+ .proc_handler = &proc_dointvec,
80512+ },
80513+#endif
80514+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
80515+ {
80516+ .procname = "harden_ipc",
80517+ .data = &grsec_enable_harden_ipc,
80518+ .maxlen = sizeof(int),
80519+ .mode = 0600,
80520+ .proc_handler = &proc_dointvec,
80521+ },
80522+#endif
80523+ {
80524+ .procname = "grsec_lock",
80525+ .data = &grsec_lock,
80526+ .maxlen = sizeof(int),
80527+ .mode = 0600,
80528+ .proc_handler = &proc_dointvec,
80529+ },
80530+#endif
80531+#ifdef CONFIG_GRKERNSEC_ROFS
80532+ {
80533+ .procname = "romount_protect",
80534+ .data = &grsec_enable_rofs,
80535+ .maxlen = sizeof(int),
80536+ .mode = 0600,
80537+ .proc_handler = &proc_dointvec_minmax,
80538+ .extra1 = &one,
80539+ .extra2 = &one,
80540+ },
80541+#endif
80542+#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
80543+ {
80544+ .procname = "deny_new_usb",
80545+ .data = &grsec_deny_new_usb,
80546+ .maxlen = sizeof(int),
80547+ .mode = 0600,
80548+ .proc_handler = &proc_dointvec,
80549+ },
80550+#endif
80551+ { }
80552+};
80553+#endif
80554diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
80555new file mode 100644
80556index 0000000..61b514e
80557--- /dev/null
80558+++ b/grsecurity/grsec_time.c
80559@@ -0,0 +1,16 @@
80560+#include <linux/kernel.h>
80561+#include <linux/sched.h>
80562+#include <linux/grinternal.h>
80563+#include <linux/module.h>
80564+
80565+void
80566+gr_log_timechange(void)
80567+{
80568+#ifdef CONFIG_GRKERNSEC_TIME
80569+ if (grsec_enable_time)
80570+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
80571+#endif
80572+ return;
80573+}
80574+
80575+EXPORT_SYMBOL_GPL(gr_log_timechange);
80576diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
80577new file mode 100644
80578index 0000000..d1953de
80579--- /dev/null
80580+++ b/grsecurity/grsec_tpe.c
80581@@ -0,0 +1,78 @@
80582+#include <linux/kernel.h>
80583+#include <linux/sched.h>
80584+#include <linux/file.h>
80585+#include <linux/fs.h>
80586+#include <linux/grinternal.h>
80587+
80588+extern int gr_acl_tpe_check(void);
80589+
80590+int
80591+gr_tpe_allow(const struct file *file)
80592+{
80593+#ifdef CONFIG_GRKERNSEC
80594+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
80595+ struct inode *file_inode = file->f_path.dentry->d_inode;
80596+ const struct cred *cred = current_cred();
80597+ char *msg = NULL;
80598+ char *msg2 = NULL;
80599+
80600+ // never restrict root
80601+ if (gr_is_global_root(cred->uid))
80602+ return 1;
80603+
80604+ if (grsec_enable_tpe) {
80605+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
80606+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
80607+ msg = "not being in trusted group";
80608+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
80609+ msg = "being in untrusted group";
80610+#else
80611+ if (in_group_p(grsec_tpe_gid))
80612+ msg = "being in untrusted group";
80613+#endif
80614+ }
80615+ if (!msg && gr_acl_tpe_check())
80616+ msg = "being in untrusted role";
80617+
80618+ // not in any affected group/role
80619+ if (!msg)
80620+ goto next_check;
80621+
80622+ if (gr_is_global_nonroot(inode->i_uid))
80623+ msg2 = "file in non-root-owned directory";
80624+ else if (inode->i_mode & S_IWOTH)
80625+ msg2 = "file in world-writable directory";
80626+ else if (inode->i_mode & S_IWGRP)
80627+ msg2 = "file in group-writable directory";
80628+ else if (file_inode->i_mode & S_IWOTH)
80629+ msg2 = "file is world-writable";
80630+
80631+ if (msg && msg2) {
80632+ char fullmsg[70] = {0};
80633+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
80634+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
80635+ return 0;
80636+ }
80637+ msg = NULL;
80638+next_check:
80639+#ifdef CONFIG_GRKERNSEC_TPE_ALL
80640+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
80641+ return 1;
80642+
80643+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
80644+ msg = "directory not owned by user";
80645+ else if (inode->i_mode & S_IWOTH)
80646+ msg = "file in world-writable directory";
80647+ else if (inode->i_mode & S_IWGRP)
80648+ msg = "file in group-writable directory";
80649+ else if (file_inode->i_mode & S_IWOTH)
80650+ msg = "file is world-writable";
80651+
80652+ if (msg) {
80653+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
80654+ return 0;
80655+ }
80656+#endif
80657+#endif
80658+ return 1;
80659+}
80660diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
80661new file mode 100644
80662index 0000000..ae02d8e
80663--- /dev/null
80664+++ b/grsecurity/grsec_usb.c
80665@@ -0,0 +1,15 @@
80666+#include <linux/kernel.h>
80667+#include <linux/grinternal.h>
80668+#include <linux/module.h>
80669+
80670+int gr_handle_new_usb(void)
80671+{
80672+#ifdef CONFIG_GRKERNSEC_DENYUSB
80673+ if (grsec_deny_new_usb) {
80674+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
80675+ return 1;
80676+ }
80677+#endif
80678+ return 0;
80679+}
80680+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
80681diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
80682new file mode 100644
80683index 0000000..158b330
80684--- /dev/null
80685+++ b/grsecurity/grsum.c
80686@@ -0,0 +1,64 @@
80687+#include <linux/err.h>
80688+#include <linux/kernel.h>
80689+#include <linux/sched.h>
80690+#include <linux/mm.h>
80691+#include <linux/scatterlist.h>
80692+#include <linux/crypto.h>
80693+#include <linux/gracl.h>
80694+
80695+
80696+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
80697+#error "crypto and sha256 must be built into the kernel"
80698+#endif
80699+
80700+int
80701+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
80702+{
80703+ struct crypto_hash *tfm;
80704+ struct hash_desc desc;
80705+ struct scatterlist sg[2];
80706+ unsigned char temp_sum[GR_SHA_LEN] __attribute__((aligned(__alignof__(unsigned long))));
80707+ unsigned long *tmpsumptr = (unsigned long *)temp_sum;
80708+ unsigned long *sumptr = (unsigned long *)sum;
80709+ int cryptres;
80710+ int retval = 1;
80711+ volatile int mismatched = 0;
80712+ volatile int dummy = 0;
80713+ unsigned int i;
80714+
80715+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
80716+ if (IS_ERR(tfm)) {
80717+ /* should never happen, since sha256 should be built in */
80718+ memset(entry->pw, 0, GR_PW_LEN);
80719+ return 1;
80720+ }
80721+
80722+ sg_init_table(sg, 2);
80723+ sg_set_buf(&sg[0], salt, GR_SALT_LEN);
80724+ sg_set_buf(&sg[1], entry->pw, strlen(entry->pw));
80725+
80726+ desc.tfm = tfm;
80727+ desc.flags = 0;
80728+
80729+ cryptres = crypto_hash_digest(&desc, sg, GR_SALT_LEN + strlen(entry->pw),
80730+ temp_sum);
80731+
80732+ memset(entry->pw, 0, GR_PW_LEN);
80733+
80734+ if (cryptres)
80735+ goto out;
80736+
80737+ for (i = 0; i < GR_SHA_LEN/sizeof(tmpsumptr[0]); i++)
80738+ if (sumptr[i] != tmpsumptr[i])
80739+ mismatched = 1;
80740+ else
80741+ dummy = 1; // waste a cycle
80742+
80743+ if (!mismatched)
80744+ retval = dummy - 1;
80745+
80746+out:
80747+ crypto_free_hash(tfm);
80748+
80749+ return retval;
80750+}
80751diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
80752index 5bdab6b..9ae82fe 100644
80753--- a/include/asm-generic/4level-fixup.h
80754+++ b/include/asm-generic/4level-fixup.h
80755@@ -14,8 +14,10 @@
80756 #define pmd_alloc(mm, pud, address) \
80757 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
80758 NULL: pmd_offset(pud, address))
80759+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
80760
80761 #define pud_alloc(mm, pgd, address) (pgd)
80762+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
80763 #define pud_offset(pgd, start) (pgd)
80764 #define pud_none(pud) 0
80765 #define pud_bad(pud) 0
80766diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
80767index b7babf0..1e4b4f1 100644
80768--- a/include/asm-generic/atomic-long.h
80769+++ b/include/asm-generic/atomic-long.h
80770@@ -22,6 +22,12 @@
80771
80772 typedef atomic64_t atomic_long_t;
80773
80774+#ifdef CONFIG_PAX_REFCOUNT
80775+typedef atomic64_unchecked_t atomic_long_unchecked_t;
80776+#else
80777+typedef atomic64_t atomic_long_unchecked_t;
80778+#endif
80779+
80780 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
80781
80782 static inline long atomic_long_read(atomic_long_t *l)
80783@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
80784 return (long)atomic64_read(v);
80785 }
80786
80787+#ifdef CONFIG_PAX_REFCOUNT
80788+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
80789+{
80790+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80791+
80792+ return (long)atomic64_read_unchecked(v);
80793+}
80794+#endif
80795+
80796 static inline void atomic_long_set(atomic_long_t *l, long i)
80797 {
80798 atomic64_t *v = (atomic64_t *)l;
80799@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
80800 atomic64_set(v, i);
80801 }
80802
80803+#ifdef CONFIG_PAX_REFCOUNT
80804+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
80805+{
80806+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80807+
80808+ atomic64_set_unchecked(v, i);
80809+}
80810+#endif
80811+
80812 static inline void atomic_long_inc(atomic_long_t *l)
80813 {
80814 atomic64_t *v = (atomic64_t *)l;
80815@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
80816 atomic64_inc(v);
80817 }
80818
80819+#ifdef CONFIG_PAX_REFCOUNT
80820+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
80821+{
80822+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80823+
80824+ atomic64_inc_unchecked(v);
80825+}
80826+#endif
80827+
80828 static inline void atomic_long_dec(atomic_long_t *l)
80829 {
80830 atomic64_t *v = (atomic64_t *)l;
80831@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
80832 atomic64_dec(v);
80833 }
80834
80835+#ifdef CONFIG_PAX_REFCOUNT
80836+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
80837+{
80838+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80839+
80840+ atomic64_dec_unchecked(v);
80841+}
80842+#endif
80843+
80844 static inline void atomic_long_add(long i, atomic_long_t *l)
80845 {
80846 atomic64_t *v = (atomic64_t *)l;
80847@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
80848 atomic64_add(i, v);
80849 }
80850
80851+#ifdef CONFIG_PAX_REFCOUNT
80852+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
80853+{
80854+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80855+
80856+ atomic64_add_unchecked(i, v);
80857+}
80858+#endif
80859+
80860 static inline void atomic_long_sub(long i, atomic_long_t *l)
80861 {
80862 atomic64_t *v = (atomic64_t *)l;
80863@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
80864 atomic64_sub(i, v);
80865 }
80866
80867+#ifdef CONFIG_PAX_REFCOUNT
80868+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
80869+{
80870+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80871+
80872+ atomic64_sub_unchecked(i, v);
80873+}
80874+#endif
80875+
80876 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
80877 {
80878 atomic64_t *v = (atomic64_t *)l;
80879@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
80880 return atomic64_add_negative(i, v);
80881 }
80882
80883-static inline long atomic_long_add_return(long i, atomic_long_t *l)
80884+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
80885 {
80886 atomic64_t *v = (atomic64_t *)l;
80887
80888 return (long)atomic64_add_return(i, v);
80889 }
80890
80891+#ifdef CONFIG_PAX_REFCOUNT
80892+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
80893+{
80894+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80895+
80896+ return (long)atomic64_add_return_unchecked(i, v);
80897+}
80898+#endif
80899+
80900 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
80901 {
80902 atomic64_t *v = (atomic64_t *)l;
80903@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
80904 return (long)atomic64_inc_return(v);
80905 }
80906
80907+#ifdef CONFIG_PAX_REFCOUNT
80908+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
80909+{
80910+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80911+
80912+ return (long)atomic64_inc_return_unchecked(v);
80913+}
80914+#endif
80915+
80916 static inline long atomic_long_dec_return(atomic_long_t *l)
80917 {
80918 atomic64_t *v = (atomic64_t *)l;
80919@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
80920
80921 typedef atomic_t atomic_long_t;
80922
80923+#ifdef CONFIG_PAX_REFCOUNT
80924+typedef atomic_unchecked_t atomic_long_unchecked_t;
80925+#else
80926+typedef atomic_t atomic_long_unchecked_t;
80927+#endif
80928+
80929 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
80930 static inline long atomic_long_read(atomic_long_t *l)
80931 {
80932@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
80933 return (long)atomic_read(v);
80934 }
80935
80936+#ifdef CONFIG_PAX_REFCOUNT
80937+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
80938+{
80939+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80940+
80941+ return (long)atomic_read_unchecked(v);
80942+}
80943+#endif
80944+
80945 static inline void atomic_long_set(atomic_long_t *l, long i)
80946 {
80947 atomic_t *v = (atomic_t *)l;
80948@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
80949 atomic_set(v, i);
80950 }
80951
80952+#ifdef CONFIG_PAX_REFCOUNT
80953+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
80954+{
80955+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80956+
80957+ atomic_set_unchecked(v, i);
80958+}
80959+#endif
80960+
80961 static inline void atomic_long_inc(atomic_long_t *l)
80962 {
80963 atomic_t *v = (atomic_t *)l;
80964@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
80965 atomic_inc(v);
80966 }
80967
80968+#ifdef CONFIG_PAX_REFCOUNT
80969+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
80970+{
80971+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80972+
80973+ atomic_inc_unchecked(v);
80974+}
80975+#endif
80976+
80977 static inline void atomic_long_dec(atomic_long_t *l)
80978 {
80979 atomic_t *v = (atomic_t *)l;
80980@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
80981 atomic_dec(v);
80982 }
80983
80984+#ifdef CONFIG_PAX_REFCOUNT
80985+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
80986+{
80987+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80988+
80989+ atomic_dec_unchecked(v);
80990+}
80991+#endif
80992+
80993 static inline void atomic_long_add(long i, atomic_long_t *l)
80994 {
80995 atomic_t *v = (atomic_t *)l;
80996@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
80997 atomic_add(i, v);
80998 }
80999
81000+#ifdef CONFIG_PAX_REFCOUNT
81001+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
81002+{
81003+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
81004+
81005+ atomic_add_unchecked(i, v);
81006+}
81007+#endif
81008+
81009 static inline void atomic_long_sub(long i, atomic_long_t *l)
81010 {
81011 atomic_t *v = (atomic_t *)l;
81012@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
81013 atomic_sub(i, v);
81014 }
81015
81016+#ifdef CONFIG_PAX_REFCOUNT
81017+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
81018+{
81019+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
81020+
81021+ atomic_sub_unchecked(i, v);
81022+}
81023+#endif
81024+
81025 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
81026 {
81027 atomic_t *v = (atomic_t *)l;
81028@@ -211,13 +349,23 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
81029 return atomic_add_negative(i, v);
81030 }
81031
81032-static inline long atomic_long_add_return(long i, atomic_long_t *l)
81033+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
81034 {
81035 atomic_t *v = (atomic_t *)l;
81036
81037 return (long)atomic_add_return(i, v);
81038 }
81039
81040+#ifdef CONFIG_PAX_REFCOUNT
81041+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
81042+{
81043+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
81044+
81045+ return (long)atomic_add_return_unchecked(i, v);
81046+}
81047+
81048+#endif
81049+
81050 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
81051 {
81052 atomic_t *v = (atomic_t *)l;
81053@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
81054 return (long)atomic_inc_return(v);
81055 }
81056
81057+#ifdef CONFIG_PAX_REFCOUNT
81058+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
81059+{
81060+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
81061+
81062+ return (long)atomic_inc_return_unchecked(v);
81063+}
81064+#endif
81065+
81066 static inline long atomic_long_dec_return(atomic_long_t *l)
81067 {
81068 atomic_t *v = (atomic_t *)l;
81069@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
81070
81071 #endif /* BITS_PER_LONG == 64 */
81072
81073+#ifdef CONFIG_PAX_REFCOUNT
81074+static inline void pax_refcount_needs_these_functions(void)
81075+{
81076+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
81077+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
81078+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
81079+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
81080+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
81081+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
81082+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
81083+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
81084+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
81085+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
81086+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
81087+#ifdef CONFIG_X86
81088+ atomic_clear_mask_unchecked(0, NULL);
81089+ atomic_set_mask_unchecked(0, NULL);
81090+#endif
81091+
81092+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
81093+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
81094+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
81095+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
81096+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
81097+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
81098+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
81099+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
81100+}
81101+#else
81102+#define atomic_read_unchecked(v) atomic_read(v)
81103+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
81104+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
81105+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
81106+#define atomic_inc_unchecked(v) atomic_inc(v)
81107+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
81108+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
81109+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
81110+#define atomic_dec_unchecked(v) atomic_dec(v)
81111+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
81112+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
81113+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
81114+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
81115+
81116+#define atomic_long_read_unchecked(v) atomic_long_read(v)
81117+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
81118+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
81119+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
81120+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
81121+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
81122+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
81123+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
81124+#endif
81125+
81126 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
81127diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
81128index 30ad9c8..c70c170 100644
81129--- a/include/asm-generic/atomic64.h
81130+++ b/include/asm-generic/atomic64.h
81131@@ -16,6 +16,8 @@ typedef struct {
81132 long long counter;
81133 } atomic64_t;
81134
81135+typedef atomic64_t atomic64_unchecked_t;
81136+
81137 #define ATOMIC64_INIT(i) { (i) }
81138
81139 extern long long atomic64_read(const atomic64_t *v);
81140@@ -51,4 +53,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
81141 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
81142 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
81143
81144+#define atomic64_read_unchecked(v) atomic64_read(v)
81145+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
81146+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
81147+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
81148+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
81149+#define atomic64_inc_unchecked(v) atomic64_inc(v)
81150+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
81151+#define atomic64_dec_unchecked(v) atomic64_dec(v)
81152+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
81153+
81154 #endif /* _ASM_GENERIC_ATOMIC64_H */
81155diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
81156index f5c40b0..e902f9d 100644
81157--- a/include/asm-generic/barrier.h
81158+++ b/include/asm-generic/barrier.h
81159@@ -82,7 +82,7 @@
81160 do { \
81161 compiletime_assert_atomic_type(*p); \
81162 smp_mb(); \
81163- ACCESS_ONCE(*p) = (v); \
81164+ ACCESS_ONCE_RW(*p) = (v); \
81165 } while (0)
81166
81167 #define smp_load_acquire(p) \
81168diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
81169index a60a7cc..0fe12f2 100644
81170--- a/include/asm-generic/bitops/__fls.h
81171+++ b/include/asm-generic/bitops/__fls.h
81172@@ -9,7 +9,7 @@
81173 *
81174 * Undefined if no set bit exists, so code should check against 0 first.
81175 */
81176-static __always_inline unsigned long __fls(unsigned long word)
81177+static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
81178 {
81179 int num = BITS_PER_LONG - 1;
81180
81181diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
81182index 0576d1f..dad6c71 100644
81183--- a/include/asm-generic/bitops/fls.h
81184+++ b/include/asm-generic/bitops/fls.h
81185@@ -9,7 +9,7 @@
81186 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
81187 */
81188
81189-static __always_inline int fls(int x)
81190+static __always_inline int __intentional_overflow(-1) fls(int x)
81191 {
81192 int r = 32;
81193
81194diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
81195index b097cf8..3d40e14 100644
81196--- a/include/asm-generic/bitops/fls64.h
81197+++ b/include/asm-generic/bitops/fls64.h
81198@@ -15,7 +15,7 @@
81199 * at position 64.
81200 */
81201 #if BITS_PER_LONG == 32
81202-static __always_inline int fls64(__u64 x)
81203+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
81204 {
81205 __u32 h = x >> 32;
81206 if (h)
81207@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
81208 return fls(x);
81209 }
81210 #elif BITS_PER_LONG == 64
81211-static __always_inline int fls64(__u64 x)
81212+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
81213 {
81214 if (x == 0)
81215 return 0;
81216diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
81217index 1bfcfe5..e04c5c9 100644
81218--- a/include/asm-generic/cache.h
81219+++ b/include/asm-generic/cache.h
81220@@ -6,7 +6,7 @@
81221 * cache lines need to provide their own cache.h.
81222 */
81223
81224-#define L1_CACHE_SHIFT 5
81225-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
81226+#define L1_CACHE_SHIFT 5UL
81227+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
81228
81229 #endif /* __ASM_GENERIC_CACHE_H */
81230diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
81231index 0d68a1e..b74a761 100644
81232--- a/include/asm-generic/emergency-restart.h
81233+++ b/include/asm-generic/emergency-restart.h
81234@@ -1,7 +1,7 @@
81235 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
81236 #define _ASM_GENERIC_EMERGENCY_RESTART_H
81237
81238-static inline void machine_emergency_restart(void)
81239+static inline __noreturn void machine_emergency_restart(void)
81240 {
81241 machine_restart(NULL);
81242 }
81243diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
81244index 90f99c7..00ce236 100644
81245--- a/include/asm-generic/kmap_types.h
81246+++ b/include/asm-generic/kmap_types.h
81247@@ -2,9 +2,9 @@
81248 #define _ASM_GENERIC_KMAP_TYPES_H
81249
81250 #ifdef __WITH_KM_FENCE
81251-# define KM_TYPE_NR 41
81252+# define KM_TYPE_NR 42
81253 #else
81254-# define KM_TYPE_NR 20
81255+# define KM_TYPE_NR 21
81256 #endif
81257
81258 #endif
81259diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
81260index 9ceb03b..62b0b8f 100644
81261--- a/include/asm-generic/local.h
81262+++ b/include/asm-generic/local.h
81263@@ -23,24 +23,37 @@ typedef struct
81264 atomic_long_t a;
81265 } local_t;
81266
81267+typedef struct {
81268+ atomic_long_unchecked_t a;
81269+} local_unchecked_t;
81270+
81271 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
81272
81273 #define local_read(l) atomic_long_read(&(l)->a)
81274+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
81275 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
81276+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
81277 #define local_inc(l) atomic_long_inc(&(l)->a)
81278+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
81279 #define local_dec(l) atomic_long_dec(&(l)->a)
81280+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
81281 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
81282+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
81283 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
81284+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
81285
81286 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
81287 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
81288 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
81289 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
81290 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
81291+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
81292 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
81293 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
81294+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
81295
81296 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
81297+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
81298 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
81299 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
81300 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
81301diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
81302index 725612b..9cc513a 100644
81303--- a/include/asm-generic/pgtable-nopmd.h
81304+++ b/include/asm-generic/pgtable-nopmd.h
81305@@ -1,14 +1,19 @@
81306 #ifndef _PGTABLE_NOPMD_H
81307 #define _PGTABLE_NOPMD_H
81308
81309-#ifndef __ASSEMBLY__
81310-
81311 #include <asm-generic/pgtable-nopud.h>
81312
81313-struct mm_struct;
81314-
81315 #define __PAGETABLE_PMD_FOLDED
81316
81317+#define PMD_SHIFT PUD_SHIFT
81318+#define PTRS_PER_PMD 1
81319+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
81320+#define PMD_MASK (~(PMD_SIZE-1))
81321+
81322+#ifndef __ASSEMBLY__
81323+
81324+struct mm_struct;
81325+
81326 /*
81327 * Having the pmd type consist of a pud gets the size right, and allows
81328 * us to conceptually access the pud entry that this pmd is folded into
81329@@ -16,11 +21,6 @@ struct mm_struct;
81330 */
81331 typedef struct { pud_t pud; } pmd_t;
81332
81333-#define PMD_SHIFT PUD_SHIFT
81334-#define PTRS_PER_PMD 1
81335-#define PMD_SIZE (1UL << PMD_SHIFT)
81336-#define PMD_MASK (~(PMD_SIZE-1))
81337-
81338 /*
81339 * The "pud_xxx()" functions here are trivial for a folded two-level
81340 * setup: the pmd is never bad, and a pmd always exists (as it's folded
81341diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
81342index 810431d..0ec4804f 100644
81343--- a/include/asm-generic/pgtable-nopud.h
81344+++ b/include/asm-generic/pgtable-nopud.h
81345@@ -1,10 +1,15 @@
81346 #ifndef _PGTABLE_NOPUD_H
81347 #define _PGTABLE_NOPUD_H
81348
81349-#ifndef __ASSEMBLY__
81350-
81351 #define __PAGETABLE_PUD_FOLDED
81352
81353+#define PUD_SHIFT PGDIR_SHIFT
81354+#define PTRS_PER_PUD 1
81355+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
81356+#define PUD_MASK (~(PUD_SIZE-1))
81357+
81358+#ifndef __ASSEMBLY__
81359+
81360 /*
81361 * Having the pud type consist of a pgd gets the size right, and allows
81362 * us to conceptually access the pgd entry that this pud is folded into
81363@@ -12,11 +17,6 @@
81364 */
81365 typedef struct { pgd_t pgd; } pud_t;
81366
81367-#define PUD_SHIFT PGDIR_SHIFT
81368-#define PTRS_PER_PUD 1
81369-#define PUD_SIZE (1UL << PUD_SHIFT)
81370-#define PUD_MASK (~(PUD_SIZE-1))
81371-
81372 /*
81373 * The "pgd_xxx()" functions here are trivial for a folded two-level
81374 * setup: the pud is never bad, and a pud always exists (as it's folded
81375@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
81376 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
81377
81378 #define pgd_populate(mm, pgd, pud) do { } while (0)
81379+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
81380 /*
81381 * (puds are folded into pgds so this doesn't get actually called,
81382 * but the define is needed for a generic inline function.)
81383diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
81384index 4d46085..f4e92ef 100644
81385--- a/include/asm-generic/pgtable.h
81386+++ b/include/asm-generic/pgtable.h
81387@@ -689,6 +689,22 @@ static inline int pmd_protnone(pmd_t pmd)
81388 }
81389 #endif /* CONFIG_NUMA_BALANCING */
81390
81391+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
81392+#ifdef CONFIG_PAX_KERNEXEC
81393+#error KERNEXEC requires pax_open_kernel
81394+#else
81395+static inline unsigned long pax_open_kernel(void) { return 0; }
81396+#endif
81397+#endif
81398+
81399+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
81400+#ifdef CONFIG_PAX_KERNEXEC
81401+#error KERNEXEC requires pax_close_kernel
81402+#else
81403+static inline unsigned long pax_close_kernel(void) { return 0; }
81404+#endif
81405+#endif
81406+
81407 #endif /* CONFIG_MMU */
81408
81409 #endif /* !__ASSEMBLY__ */
81410diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
81411index 72d8803..cb9749c 100644
81412--- a/include/asm-generic/uaccess.h
81413+++ b/include/asm-generic/uaccess.h
81414@@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n)
81415 return __clear_user(to, n);
81416 }
81417
81418+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
81419+#ifdef CONFIG_PAX_MEMORY_UDEREF
81420+#error UDEREF requires pax_open_userland
81421+#else
81422+static inline unsigned long pax_open_userland(void) { return 0; }
81423+#endif
81424+#endif
81425+
81426+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
81427+#ifdef CONFIG_PAX_MEMORY_UDEREF
81428+#error UDEREF requires pax_close_userland
81429+#else
81430+static inline unsigned long pax_close_userland(void) { return 0; }
81431+#endif
81432+#endif
81433+
81434 #endif /* __ASM_GENERIC_UACCESS_H */
81435diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
81436index ac78910..775a306 100644
81437--- a/include/asm-generic/vmlinux.lds.h
81438+++ b/include/asm-generic/vmlinux.lds.h
81439@@ -234,6 +234,7 @@
81440 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
81441 VMLINUX_SYMBOL(__start_rodata) = .; \
81442 *(.rodata) *(.rodata.*) \
81443+ *(.data..read_only) \
81444 *(__vermagic) /* Kernel version magic */ \
81445 . = ALIGN(8); \
81446 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
81447@@ -727,17 +728,18 @@
81448 * section in the linker script will go there too. @phdr should have
81449 * a leading colon.
81450 *
81451- * Note that this macros defines __per_cpu_load as an absolute symbol.
81452+ * Note that this macros defines per_cpu_load as an absolute symbol.
81453 * If there is no need to put the percpu section at a predetermined
81454 * address, use PERCPU_SECTION.
81455 */
81456 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
81457- VMLINUX_SYMBOL(__per_cpu_load) = .; \
81458- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
81459+ per_cpu_load = .; \
81460+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
81461 - LOAD_OFFSET) { \
81462+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
81463 PERCPU_INPUT(cacheline) \
81464 } phdr \
81465- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
81466+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
81467
81468 /**
81469 * PERCPU_SECTION - define output section for percpu area, simple version
81470diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
81471index 623a59c..1e79ab9 100644
81472--- a/include/crypto/algapi.h
81473+++ b/include/crypto/algapi.h
81474@@ -34,7 +34,7 @@ struct crypto_type {
81475 unsigned int maskclear;
81476 unsigned int maskset;
81477 unsigned int tfmsize;
81478-};
81479+} __do_const;
81480
81481 struct crypto_instance {
81482 struct crypto_alg alg;
81483diff --git a/include/drm/drmP.h b/include/drm/drmP.h
81484index e928625..ff97886 100644
81485--- a/include/drm/drmP.h
81486+++ b/include/drm/drmP.h
81487@@ -59,6 +59,7 @@
81488
81489 #include <asm/mman.h>
81490 #include <asm/pgalloc.h>
81491+#include <asm/local.h>
81492 #include <asm/uaccess.h>
81493
81494 #include <uapi/drm/drm.h>
81495@@ -133,17 +134,18 @@ void drm_err(const char *format, ...);
81496 /*@{*/
81497
81498 /* driver capabilities and requirements mask */
81499-#define DRIVER_USE_AGP 0x1
81500-#define DRIVER_PCI_DMA 0x8
81501-#define DRIVER_SG 0x10
81502-#define DRIVER_HAVE_DMA 0x20
81503-#define DRIVER_HAVE_IRQ 0x40
81504-#define DRIVER_IRQ_SHARED 0x80
81505-#define DRIVER_GEM 0x1000
81506-#define DRIVER_MODESET 0x2000
81507-#define DRIVER_PRIME 0x4000
81508-#define DRIVER_RENDER 0x8000
81509-#define DRIVER_ATOMIC 0x10000
81510+#define DRIVER_USE_AGP 0x1
81511+#define DRIVER_PCI_DMA 0x8
81512+#define DRIVER_SG 0x10
81513+#define DRIVER_HAVE_DMA 0x20
81514+#define DRIVER_HAVE_IRQ 0x40
81515+#define DRIVER_IRQ_SHARED 0x80
81516+#define DRIVER_GEM 0x1000
81517+#define DRIVER_MODESET 0x2000
81518+#define DRIVER_PRIME 0x4000
81519+#define DRIVER_RENDER 0x8000
81520+#define DRIVER_ATOMIC 0x10000
81521+#define DRIVER_KMS_LEGACY_CONTEXT 0x20000
81522
81523 /***********************************************************************/
81524 /** \name Macros to make printk easier */
81525@@ -224,10 +226,12 @@ void drm_err(const char *format, ...);
81526 * \param cmd command.
81527 * \param arg argument.
81528 */
81529-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
81530+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
81531+ struct drm_file *file_priv);
81532+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
81533 struct drm_file *file_priv);
81534
81535-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
81536+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
81537 unsigned long arg);
81538
81539 #define DRM_IOCTL_NR(n) _IOC_NR(n)
81540@@ -243,10 +247,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
81541 struct drm_ioctl_desc {
81542 unsigned int cmd;
81543 int flags;
81544- drm_ioctl_t *func;
81545+ drm_ioctl_t func;
81546 unsigned int cmd_drv;
81547 const char *name;
81548-};
81549+} __do_const;
81550
81551 /**
81552 * Creates a driver or general drm_ioctl_desc array entry for the given
81553@@ -632,7 +636,8 @@ struct drm_info_list {
81554 int (*show)(struct seq_file*, void*); /** show callback */
81555 u32 driver_features; /**< Required driver features for this entry */
81556 void *data;
81557-};
81558+} __do_const;
81559+typedef struct drm_info_list __no_const drm_info_list_no_const;
81560
81561 /**
81562 * debugfs node structure. This structure represents a debugfs file.
81563@@ -716,7 +721,7 @@ struct drm_device {
81564
81565 /** \name Usage Counters */
81566 /*@{ */
81567- int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
81568+ local_t open_count; /**< Outstanding files open, protected by drm_global_mutex. */
81569 spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */
81570 int buf_use; /**< Buffers in use -- cannot alloc */
81571 atomic_t buf_alloc; /**< Buffer allocation in progress */
81572diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
81573index c250a22..59d2094 100644
81574--- a/include/drm/drm_crtc_helper.h
81575+++ b/include/drm/drm_crtc_helper.h
81576@@ -160,7 +160,7 @@ struct drm_encoder_helper_funcs {
81577 int (*atomic_check)(struct drm_encoder *encoder,
81578 struct drm_crtc_state *crtc_state,
81579 struct drm_connector_state *conn_state);
81580-};
81581+} __no_const;
81582
81583 /**
81584 * struct drm_connector_helper_funcs - helper operations for connectors
81585diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
81586index d016dc5..3951fe0 100644
81587--- a/include/drm/i915_pciids.h
81588+++ b/include/drm/i915_pciids.h
81589@@ -37,7 +37,7 @@
81590 */
81591 #define INTEL_VGA_DEVICE(id, info) { \
81592 0x8086, id, \
81593- ~0, ~0, \
81594+ PCI_ANY_ID, PCI_ANY_ID, \
81595 0x030000, 0xff0000, \
81596 (unsigned long) info }
81597
81598diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
81599index 72dcbe8..8db58d7 100644
81600--- a/include/drm/ttm/ttm_memory.h
81601+++ b/include/drm/ttm/ttm_memory.h
81602@@ -48,7 +48,7 @@
81603
81604 struct ttm_mem_shrink {
81605 int (*do_shrink) (struct ttm_mem_shrink *);
81606-};
81607+} __no_const;
81608
81609 /**
81610 * struct ttm_mem_global - Global memory accounting structure.
81611diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
81612index 49a8284..9643967 100644
81613--- a/include/drm/ttm/ttm_page_alloc.h
81614+++ b/include/drm/ttm/ttm_page_alloc.h
81615@@ -80,6 +80,7 @@ void ttm_dma_page_alloc_fini(void);
81616 */
81617 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
81618
81619+struct device;
81620 extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
81621 extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
81622
81623diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
81624index 4b840e8..155d235 100644
81625--- a/include/keys/asymmetric-subtype.h
81626+++ b/include/keys/asymmetric-subtype.h
81627@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
81628 /* Verify the signature on a key of this subtype (optional) */
81629 int (*verify_signature)(const struct key *key,
81630 const struct public_key_signature *sig);
81631-};
81632+} __do_const;
81633
81634 /**
81635 * asymmetric_key_subtype - Get the subtype from an asymmetric key
81636diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
81637index c1da539..1dcec55 100644
81638--- a/include/linux/atmdev.h
81639+++ b/include/linux/atmdev.h
81640@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
81641 #endif
81642
81643 struct k_atm_aal_stats {
81644-#define __HANDLE_ITEM(i) atomic_t i
81645+#define __HANDLE_ITEM(i) atomic_unchecked_t i
81646 __AAL_STAT_ITEMS
81647 #undef __HANDLE_ITEM
81648 };
81649@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
81650 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
81651 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
81652 struct module *owner;
81653-};
81654+} __do_const ;
81655
81656 struct atmphy_ops {
81657 int (*start)(struct atm_dev *dev);
81658diff --git a/include/linux/atomic.h b/include/linux/atomic.h
81659index 5b08a85..60922fb 100644
81660--- a/include/linux/atomic.h
81661+++ b/include/linux/atomic.h
81662@@ -12,7 +12,7 @@
81663 * Atomically adds @a to @v, so long as @v was not already @u.
81664 * Returns non-zero if @v was not @u, and zero otherwise.
81665 */
81666-static inline int atomic_add_unless(atomic_t *v, int a, int u)
81667+static inline int __intentional_overflow(-1) atomic_add_unless(atomic_t *v, int a, int u)
81668 {
81669 return __atomic_add_unless(v, a, u) != u;
81670 }
81671diff --git a/include/linux/audit.h b/include/linux/audit.h
81672index c2e7e3a..8bfc0e1 100644
81673--- a/include/linux/audit.h
81674+++ b/include/linux/audit.h
81675@@ -223,7 +223,7 @@ static inline void audit_ptrace(struct task_struct *t)
81676 extern unsigned int audit_serial(void);
81677 extern int auditsc_get_stamp(struct audit_context *ctx,
81678 struct timespec *t, unsigned int *serial);
81679-extern int audit_set_loginuid(kuid_t loginuid);
81680+extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
81681
81682 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
81683 {
81684diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
81685index 576e463..28fd926 100644
81686--- a/include/linux/binfmts.h
81687+++ b/include/linux/binfmts.h
81688@@ -44,7 +44,7 @@ struct linux_binprm {
81689 unsigned interp_flags;
81690 unsigned interp_data;
81691 unsigned long loader, exec;
81692-};
81693+} __randomize_layout;
81694
81695 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
81696 #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
81697@@ -77,8 +77,10 @@ struct linux_binfmt {
81698 int (*load_binary)(struct linux_binprm *);
81699 int (*load_shlib)(struct file *);
81700 int (*core_dump)(struct coredump_params *cprm);
81701+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
81702+ void (*handle_mmap)(struct file *);
81703 unsigned long min_coredump; /* minimal dump size */
81704-};
81705+} __do_const __randomize_layout;
81706
81707 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
81708
81709diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
81710index dbfbf49..10be372 100644
81711--- a/include/linux/bitmap.h
81712+++ b/include/linux/bitmap.h
81713@@ -299,7 +299,7 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
81714 return __bitmap_full(src, nbits);
81715 }
81716
81717-static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
81718+static inline int __intentional_overflow(-1) bitmap_weight(const unsigned long *src, unsigned int nbits)
81719 {
81720 if (small_const_nbits(nbits))
81721 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
81722diff --git a/include/linux/bitops.h b/include/linux/bitops.h
81723index 5d858e0..336c1d9 100644
81724--- a/include/linux/bitops.h
81725+++ b/include/linux/bitops.h
81726@@ -105,7 +105,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
81727 * @word: value to rotate
81728 * @shift: bits to roll
81729 */
81730-static inline __u32 rol32(__u32 word, unsigned int shift)
81731+static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
81732 {
81733 return (word << shift) | (word >> (32 - shift));
81734 }
81735@@ -115,7 +115,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
81736 * @word: value to rotate
81737 * @shift: bits to roll
81738 */
81739-static inline __u32 ror32(__u32 word, unsigned int shift)
81740+static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
81741 {
81742 return (word >> shift) | (word << (32 - shift));
81743 }
81744@@ -171,7 +171,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
81745 return (__s32)(value << shift) >> shift;
81746 }
81747
81748-static inline unsigned fls_long(unsigned long l)
81749+static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
81750 {
81751 if (sizeof(l) == 4)
81752 return fls(l);
81753diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
81754index 7f9a516..8889453 100644
81755--- a/include/linux/blkdev.h
81756+++ b/include/linux/blkdev.h
81757@@ -1616,7 +1616,7 @@ struct block_device_operations {
81758 /* this callback is with swap_lock and sometimes page table lock held */
81759 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
81760 struct module *owner;
81761-};
81762+} __do_const;
81763
81764 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
81765 unsigned long);
81766diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
81767index afc1343..9735539 100644
81768--- a/include/linux/blktrace_api.h
81769+++ b/include/linux/blktrace_api.h
81770@@ -25,7 +25,7 @@ struct blk_trace {
81771 struct dentry *dropped_file;
81772 struct dentry *msg_file;
81773 struct list_head running_list;
81774- atomic_t dropped;
81775+ atomic_unchecked_t dropped;
81776 };
81777
81778 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
81779diff --git a/include/linux/cache.h b/include/linux/cache.h
81780index 17e7e82..1d7da26 100644
81781--- a/include/linux/cache.h
81782+++ b/include/linux/cache.h
81783@@ -16,6 +16,14 @@
81784 #define __read_mostly
81785 #endif
81786
81787+#ifndef __read_only
81788+#ifdef CONFIG_PAX_KERNEXEC
81789+#error KERNEXEC requires __read_only
81790+#else
81791+#define __read_only __read_mostly
81792+#endif
81793+#endif
81794+
81795 #ifndef ____cacheline_aligned
81796 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
81797 #endif
81798diff --git a/include/linux/capability.h b/include/linux/capability.h
81799index aa93e5e..985a1b0 100644
81800--- a/include/linux/capability.h
81801+++ b/include/linux/capability.h
81802@@ -214,9 +214,14 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
81803 extern bool capable(int cap);
81804 extern bool ns_capable(struct user_namespace *ns, int cap);
81805 extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
81806+extern bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap);
81807 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
81808+extern bool capable_nolog(int cap);
81809+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
81810
81811 /* audit system wants to get cap info from files as well */
81812 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
81813
81814+extern int is_privileged_binary(const struct dentry *dentry);
81815+
81816 #endif /* !_LINUX_CAPABILITY_H */
81817diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
81818index 8609d57..86e4d79 100644
81819--- a/include/linux/cdrom.h
81820+++ b/include/linux/cdrom.h
81821@@ -87,7 +87,6 @@ struct cdrom_device_ops {
81822
81823 /* driver specifications */
81824 const int capability; /* capability flags */
81825- int n_minors; /* number of active minor devices */
81826 /* handle uniform packets for scsi type devices (scsi,atapi) */
81827 int (*generic_packet) (struct cdrom_device_info *,
81828 struct packet_command *);
81829diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
81830index 4ce9056..86caac6 100644
81831--- a/include/linux/cleancache.h
81832+++ b/include/linux/cleancache.h
81833@@ -31,7 +31,7 @@ struct cleancache_ops {
81834 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
81835 void (*invalidate_inode)(int, struct cleancache_filekey);
81836 void (*invalidate_fs)(int);
81837-};
81838+} __no_const;
81839
81840 extern struct cleancache_ops *
81841 cleancache_register_ops(struct cleancache_ops *ops);
81842diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
81843index 5591ea7..61b77ce 100644
81844--- a/include/linux/clk-provider.h
81845+++ b/include/linux/clk-provider.h
81846@@ -195,6 +195,7 @@ struct clk_ops {
81847 void (*init)(struct clk_hw *hw);
81848 int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
81849 };
81850+typedef struct clk_ops __no_const clk_ops_no_const;
81851
81852 /**
81853 * struct clk_init_data - holds init data that's common to all clocks and is
81854diff --git a/include/linux/compat.h b/include/linux/compat.h
81855index ab25814..d1540d1 100644
81856--- a/include/linux/compat.h
81857+++ b/include/linux/compat.h
81858@@ -316,7 +316,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
81859 compat_size_t __user *len_ptr);
81860
81861 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
81862-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
81863+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
81864 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
81865 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
81866 compat_ssize_t msgsz, int msgflg);
81867@@ -325,7 +325,7 @@ asmlinkage long compat_sys_msgrcv(int msqid, compat_uptr_t msgp,
81868 long compat_sys_msgctl(int first, int second, void __user *uptr);
81869 long compat_sys_shmctl(int first, int second, void __user *uptr);
81870 long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
81871- unsigned nsems, const struct compat_timespec __user *timeout);
81872+ compat_long_t nsems, const struct compat_timespec __user *timeout);
81873 asmlinkage long compat_sys_keyctl(u32 option,
81874 u32 arg2, u32 arg3, u32 arg4, u32 arg5);
81875 asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
81876@@ -439,7 +439,7 @@ extern int compat_ptrace_request(struct task_struct *child,
81877 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
81878 compat_ulong_t addr, compat_ulong_t data);
81879 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
81880- compat_long_t addr, compat_long_t data);
81881+ compat_ulong_t addr, compat_ulong_t data);
81882
81883 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
81884 /*
81885diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
81886index 769e198..f670585 100644
81887--- a/include/linux/compiler-gcc4.h
81888+++ b/include/linux/compiler-gcc4.h
81889@@ -39,9 +39,34 @@
81890 # define __compiletime_warning(message) __attribute__((warning(message)))
81891 # define __compiletime_error(message) __attribute__((error(message)))
81892 #endif /* __CHECKER__ */
81893+
81894+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
81895+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
81896+#define __bos0(ptr) __bos((ptr), 0)
81897+#define __bos1(ptr) __bos((ptr), 1)
81898 #endif /* GCC_VERSION >= 40300 */
81899
81900 #if GCC_VERSION >= 40500
81901+
81902+#ifdef RANDSTRUCT_PLUGIN
81903+#define __randomize_layout __attribute__((randomize_layout))
81904+#define __no_randomize_layout __attribute__((no_randomize_layout))
81905+#endif
81906+
81907+#ifdef CONSTIFY_PLUGIN
81908+#define __no_const __attribute__((no_const))
81909+#define __do_const __attribute__((do_const))
81910+#endif
81911+
81912+#ifdef SIZE_OVERFLOW_PLUGIN
81913+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
81914+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
81915+#endif
81916+
81917+#ifdef LATENT_ENTROPY_PLUGIN
81918+#define __latent_entropy __attribute__((latent_entropy))
81919+#endif
81920+
81921 /*
81922 * Mark a position in code as unreachable. This can be used to
81923 * suppress control flow warnings after asm blocks that transfer
81924diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
81925index efee493..06f9f63 100644
81926--- a/include/linux/compiler-gcc5.h
81927+++ b/include/linux/compiler-gcc5.h
81928@@ -28,6 +28,30 @@
81929 # define __compiletime_error(message) __attribute__((error(message)))
81930 #endif /* __CHECKER__ */
81931
81932+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
81933+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
81934+#define __bos0(ptr) __bos((ptr), 0)
81935+#define __bos1(ptr) __bos((ptr), 1)
81936+
81937+#ifdef RANDSTRUCT_PLUGIN
81938+#define __randomize_layout __attribute__((randomize_layout))
81939+#define __no_randomize_layout __attribute__((no_randomize_layout))
81940+#endif
81941+
81942+#ifdef CONSTIFY_PLUGIN
81943+#define __no_const __attribute__((no_const))
81944+#define __do_const __attribute__((do_const))
81945+#endif
81946+
81947+#ifdef SIZE_OVERFLOW_PLUGIN
81948+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
81949+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
81950+#endif
81951+
81952+#ifdef LATENT_ENTROPY_PLUGIN
81953+#define __latent_entropy __attribute__((latent_entropy))
81954+#endif
81955+
81956 /*
81957 * Mark a position in code as unreachable. This can be used to
81958 * suppress control flow warnings after asm blocks that transfer
81959diff --git a/include/linux/compiler.h b/include/linux/compiler.h
81960index 1b45e4a..33028cd 100644
81961--- a/include/linux/compiler.h
81962+++ b/include/linux/compiler.h
81963@@ -5,11 +5,14 @@
81964
81965 #ifdef __CHECKER__
81966 # define __user __attribute__((noderef, address_space(1)))
81967+# define __force_user __force __user
81968 # define __kernel __attribute__((address_space(0)))
81969+# define __force_kernel __force __kernel
81970 # define __safe __attribute__((safe))
81971 # define __force __attribute__((force))
81972 # define __nocast __attribute__((nocast))
81973 # define __iomem __attribute__((noderef, address_space(2)))
81974+# define __force_iomem __force __iomem
81975 # define __must_hold(x) __attribute__((context(x,1,1)))
81976 # define __acquires(x) __attribute__((context(x,0,1)))
81977 # define __releases(x) __attribute__((context(x,1,0)))
81978@@ -17,20 +20,37 @@
81979 # define __release(x) __context__(x,-1)
81980 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
81981 # define __percpu __attribute__((noderef, address_space(3)))
81982+# define __force_percpu __force __percpu
81983 #ifdef CONFIG_SPARSE_RCU_POINTER
81984 # define __rcu __attribute__((noderef, address_space(4)))
81985+# define __force_rcu __force __rcu
81986 #else
81987 # define __rcu
81988+# define __force_rcu
81989 #endif
81990 extern void __chk_user_ptr(const volatile void __user *);
81991 extern void __chk_io_ptr(const volatile void __iomem *);
81992 #else
81993-# define __user
81994-# define __kernel
81995+# ifdef CHECKER_PLUGIN
81996+//# define __user
81997+//# define __force_user
81998+//# define __kernel
81999+//# define __force_kernel
82000+# else
82001+# ifdef STRUCTLEAK_PLUGIN
82002+# define __user __attribute__((user))
82003+# else
82004+# define __user
82005+# endif
82006+# define __force_user
82007+# define __kernel
82008+# define __force_kernel
82009+# endif
82010 # define __safe
82011 # define __force
82012 # define __nocast
82013 # define __iomem
82014+# define __force_iomem
82015 # define __chk_user_ptr(x) (void)0
82016 # define __chk_io_ptr(x) (void)0
82017 # define __builtin_warning(x, y...) (1)
82018@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
82019 # define __release(x) (void)0
82020 # define __cond_lock(x,c) (c)
82021 # define __percpu
82022+# define __force_percpu
82023 # define __rcu
82024+# define __force_rcu
82025 #endif
82026
82027 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
82028@@ -205,32 +227,32 @@ static __always_inline void data_access_exceeds_word_size(void)
82029 static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
82030 {
82031 switch (size) {
82032- case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
82033- case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
82034- case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
82035+ case 1: *(__u8 *)res = *(const volatile __u8 *)p; break;
82036+ case 2: *(__u16 *)res = *(const volatile __u16 *)p; break;
82037+ case 4: *(__u32 *)res = *(const volatile __u32 *)p; break;
82038 #ifdef CONFIG_64BIT
82039- case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
82040+ case 8: *(__u64 *)res = *(const volatile __u64 *)p; break;
82041 #endif
82042 default:
82043 barrier();
82044- __builtin_memcpy((void *)res, (const void *)p, size);
82045+ __builtin_memcpy(res, (const void *)p, size);
82046 data_access_exceeds_word_size();
82047 barrier();
82048 }
82049 }
82050
82051-static __always_inline void __write_once_size(volatile void *p, void *res, int size)
82052+static __always_inline void __write_once_size(volatile void *p, const void *res, int size)
82053 {
82054 switch (size) {
82055- case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
82056- case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
82057- case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
82058+ case 1: *(volatile __u8 *)p = *(const __u8 *)res; break;
82059+ case 2: *(volatile __u16 *)p = *(const __u16 *)res; break;
82060+ case 4: *(volatile __u32 *)p = *(const __u32 *)res; break;
82061 #ifdef CONFIG_64BIT
82062- case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
82063+ case 8: *(volatile __u64 *)p = *(const __u64 *)res; break;
82064 #endif
82065 default:
82066 barrier();
82067- __builtin_memcpy((void *)p, (const void *)res, size);
82068+ __builtin_memcpy((void *)p, res, size);
82069 data_access_exceeds_word_size();
82070 barrier();
82071 }
82072@@ -364,6 +386,34 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
82073 # define __attribute_const__ /* unimplemented */
82074 #endif
82075
82076+#ifndef __randomize_layout
82077+# define __randomize_layout
82078+#endif
82079+
82080+#ifndef __no_randomize_layout
82081+# define __no_randomize_layout
82082+#endif
82083+
82084+#ifndef __no_const
82085+# define __no_const
82086+#endif
82087+
82088+#ifndef __do_const
82089+# define __do_const
82090+#endif
82091+
82092+#ifndef __size_overflow
82093+# define __size_overflow(...)
82094+#endif
82095+
82096+#ifndef __intentional_overflow
82097+# define __intentional_overflow(...)
82098+#endif
82099+
82100+#ifndef __latent_entropy
82101+# define __latent_entropy
82102+#endif
82103+
82104 /*
82105 * Tell gcc if a function is cold. The compiler will assume any path
82106 * directly leading to the call is unlikely.
82107@@ -373,6 +423,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
82108 #define __cold
82109 #endif
82110
82111+#ifndef __alloc_size
82112+#define __alloc_size(...)
82113+#endif
82114+
82115+#ifndef __bos
82116+#define __bos(ptr, arg)
82117+#endif
82118+
82119+#ifndef __bos0
82120+#define __bos0(ptr)
82121+#endif
82122+
82123+#ifndef __bos1
82124+#define __bos1(ptr)
82125+#endif
82126+
82127 /* Simple shorthand for a section definition */
82128 #ifndef __section
82129 # define __section(S) __attribute__ ((__section__(#S)))
82130@@ -387,6 +453,8 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
82131 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
82132 #endif
82133
82134+#define __type_is_unsigned(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
82135+
82136 /* Is this type a native word size -- useful for atomic operations */
82137 #ifndef __native_word
82138 # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
82139@@ -466,8 +534,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
82140 */
82141 #define __ACCESS_ONCE(x) ({ \
82142 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
82143- (volatile typeof(x) *)&(x); })
82144+ (volatile const typeof(x) *)&(x); })
82145 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
82146+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
82147
82148 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
82149 #ifdef CONFIG_KPROBES
82150diff --git a/include/linux/completion.h b/include/linux/completion.h
82151index 5d5aaae..0ea9b84 100644
82152--- a/include/linux/completion.h
82153+++ b/include/linux/completion.h
82154@@ -90,16 +90,16 @@ static inline void reinit_completion(struct completion *x)
82155
82156 extern void wait_for_completion(struct completion *);
82157 extern void wait_for_completion_io(struct completion *);
82158-extern int wait_for_completion_interruptible(struct completion *x);
82159-extern int wait_for_completion_killable(struct completion *x);
82160+extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
82161+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
82162 extern unsigned long wait_for_completion_timeout(struct completion *x,
82163- unsigned long timeout);
82164+ unsigned long timeout) __intentional_overflow(-1);
82165 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
82166- unsigned long timeout);
82167+ unsigned long timeout) __intentional_overflow(-1);
82168 extern long wait_for_completion_interruptible_timeout(
82169- struct completion *x, unsigned long timeout);
82170+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
82171 extern long wait_for_completion_killable_timeout(
82172- struct completion *x, unsigned long timeout);
82173+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
82174 extern bool try_wait_for_completion(struct completion *x);
82175 extern bool completion_done(struct completion *x);
82176
82177diff --git a/include/linux/configfs.h b/include/linux/configfs.h
82178index 34025df..d94bbbc 100644
82179--- a/include/linux/configfs.h
82180+++ b/include/linux/configfs.h
82181@@ -125,7 +125,7 @@ struct configfs_attribute {
82182 const char *ca_name;
82183 struct module *ca_owner;
82184 umode_t ca_mode;
82185-};
82186+} __do_const;
82187
82188 /*
82189 * Users often need to create attribute structures for their configurable
82190diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
82191index 2ee4888..0451f5e 100644
82192--- a/include/linux/cpufreq.h
82193+++ b/include/linux/cpufreq.h
82194@@ -207,6 +207,7 @@ struct global_attr {
82195 ssize_t (*store)(struct kobject *a, struct attribute *b,
82196 const char *c, size_t count);
82197 };
82198+typedef struct global_attr __no_const global_attr_no_const;
82199
82200 #define define_one_global_ro(_name) \
82201 static struct global_attr _name = \
82202@@ -278,7 +279,7 @@ struct cpufreq_driver {
82203 bool boost_supported;
82204 bool boost_enabled;
82205 int (*set_boost)(int state);
82206-};
82207+} __do_const;
82208
82209 /* flags */
82210 #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
82211diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
82212index 9c5e892..feb34e0 100644
82213--- a/include/linux/cpuidle.h
82214+++ b/include/linux/cpuidle.h
82215@@ -59,7 +59,8 @@ struct cpuidle_state {
82216 void (*enter_freeze) (struct cpuidle_device *dev,
82217 struct cpuidle_driver *drv,
82218 int index);
82219-};
82220+} __do_const;
82221+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
82222
82223 /* Idle State Flags */
82224 #define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
82225@@ -227,7 +228,7 @@ struct cpuidle_governor {
82226 void (*reflect) (struct cpuidle_device *dev, int index);
82227
82228 struct module *owner;
82229-};
82230+} __do_const;
82231
82232 #ifdef CONFIG_CPU_IDLE
82233 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
82234diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
82235index 086549a..a572d94 100644
82236--- a/include/linux/cpumask.h
82237+++ b/include/linux/cpumask.h
82238@@ -126,17 +126,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
82239 }
82240
82241 /* Valid inputs for n are -1 and 0. */
82242-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
82243+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
82244 {
82245 return n+1;
82246 }
82247
82248-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
82249+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
82250 {
82251 return n+1;
82252 }
82253
82254-static inline unsigned int cpumask_next_and(int n,
82255+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
82256 const struct cpumask *srcp,
82257 const struct cpumask *andp)
82258 {
82259@@ -182,7 +182,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
82260 *
82261 * Returns >= nr_cpu_ids if no further cpus set.
82262 */
82263-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
82264+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
82265 {
82266 /* -1 is a legal arg here. */
82267 if (n != -1)
82268@@ -197,7 +197,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
82269 *
82270 * Returns >= nr_cpu_ids if no further cpus unset.
82271 */
82272-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
82273+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
82274 {
82275 /* -1 is a legal arg here. */
82276 if (n != -1)
82277@@ -205,7 +205,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
82278 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
82279 }
82280
82281-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
82282+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
82283 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
82284 int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
82285
82286@@ -472,7 +472,7 @@ static inline bool cpumask_full(const struct cpumask *srcp)
82287 * cpumask_weight - Count of bits in *srcp
82288 * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
82289 */
82290-static inline unsigned int cpumask_weight(const struct cpumask *srcp)
82291+static inline unsigned int __intentional_overflow(-1) cpumask_weight(const struct cpumask *srcp)
82292 {
82293 return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
82294 }
82295diff --git a/include/linux/cred.h b/include/linux/cred.h
82296index 2fb2ca2..d6a3340 100644
82297--- a/include/linux/cred.h
82298+++ b/include/linux/cred.h
82299@@ -35,7 +35,7 @@ struct group_info {
82300 int nblocks;
82301 kgid_t small_block[NGROUPS_SMALL];
82302 kgid_t *blocks[0];
82303-};
82304+} __randomize_layout;
82305
82306 /**
82307 * get_group_info - Get a reference to a group info structure
82308@@ -137,7 +137,7 @@ struct cred {
82309 struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
82310 struct group_info *group_info; /* supplementary groups for euid/fsgid */
82311 struct rcu_head rcu; /* RCU deletion hook */
82312-};
82313+} __randomize_layout;
82314
82315 extern void __put_cred(struct cred *);
82316 extern void exit_creds(struct task_struct *);
82317@@ -195,6 +195,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
82318 static inline void validate_process_creds(void)
82319 {
82320 }
82321+static inline void validate_task_creds(struct task_struct *task)
82322+{
82323+}
82324 #endif
82325
82326 /**
82327@@ -332,6 +335,7 @@ static inline void put_cred(const struct cred *_cred)
82328
82329 #define task_uid(task) (task_cred_xxx((task), uid))
82330 #define task_euid(task) (task_cred_xxx((task), euid))
82331+#define task_securebits(task) (task_cred_xxx((task), securebits))
82332
82333 #define current_cred_xxx(xxx) \
82334 ({ \
82335diff --git a/include/linux/crypto.h b/include/linux/crypto.h
82336index fb5ef16..05d1e59 100644
82337--- a/include/linux/crypto.h
82338+++ b/include/linux/crypto.h
82339@@ -626,7 +626,7 @@ struct cipher_tfm {
82340 const u8 *key, unsigned int keylen);
82341 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
82342 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
82343-};
82344+} __no_const;
82345
82346 struct hash_tfm {
82347 int (*init)(struct hash_desc *desc);
82348@@ -647,13 +647,13 @@ struct compress_tfm {
82349 int (*cot_decompress)(struct crypto_tfm *tfm,
82350 const u8 *src, unsigned int slen,
82351 u8 *dst, unsigned int *dlen);
82352-};
82353+} __no_const;
82354
82355 struct rng_tfm {
82356 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
82357 unsigned int dlen);
82358 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
82359-};
82360+} __no_const;
82361
82362 #define crt_ablkcipher crt_u.ablkcipher
82363 #define crt_aead crt_u.aead
82364diff --git a/include/linux/ctype.h b/include/linux/ctype.h
82365index 653589e..4ef254a 100644
82366--- a/include/linux/ctype.h
82367+++ b/include/linux/ctype.h
82368@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
82369 * Fast implementation of tolower() for internal usage. Do not use in your
82370 * code.
82371 */
82372-static inline char _tolower(const char c)
82373+static inline unsigned char _tolower(const unsigned char c)
82374 {
82375 return c | 0x20;
82376 }
82377diff --git a/include/linux/dcache.h b/include/linux/dcache.h
82378index d835879..c8e5b92 100644
82379--- a/include/linux/dcache.h
82380+++ b/include/linux/dcache.h
82381@@ -123,6 +123,9 @@ struct dentry {
82382 unsigned long d_time; /* used by d_revalidate */
82383 void *d_fsdata; /* fs-specific data */
82384
82385+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
82386+ atomic_t chroot_refcnt; /* tracks use of directory in chroot */
82387+#endif
82388 struct list_head d_lru; /* LRU list */
82389 struct list_head d_child; /* child of parent list */
82390 struct list_head d_subdirs; /* our children */
82391@@ -133,7 +136,7 @@ struct dentry {
82392 struct hlist_node d_alias; /* inode alias list */
82393 struct rcu_head d_rcu;
82394 } d_u;
82395-};
82396+} __randomize_layout;
82397
82398 /*
82399 * dentry->d_lock spinlock nesting subclasses:
82400@@ -319,7 +322,7 @@ extern struct dentry *__d_lookup_rcu(const struct dentry *parent,
82401
82402 static inline unsigned d_count(const struct dentry *dentry)
82403 {
82404- return dentry->d_lockref.count;
82405+ return __lockref_read(&dentry->d_lockref);
82406 }
82407
82408 /*
82409@@ -347,7 +350,7 @@ extern char *dentry_path(struct dentry *, char *, int);
82410 static inline struct dentry *dget_dlock(struct dentry *dentry)
82411 {
82412 if (dentry)
82413- dentry->d_lockref.count++;
82414+ __lockref_inc(&dentry->d_lockref);
82415 return dentry;
82416 }
82417
82418diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
82419index 7925bf0..d5143d2 100644
82420--- a/include/linux/decompress/mm.h
82421+++ b/include/linux/decompress/mm.h
82422@@ -77,7 +77,7 @@ static void free(void *where)
82423 * warnings when not needed (indeed large_malloc / large_free are not
82424 * needed by inflate */
82425
82426-#define malloc(a) kmalloc(a, GFP_KERNEL)
82427+#define malloc(a) kmalloc((a), GFP_KERNEL)
82428 #define free(a) kfree(a)
82429
82430 #define large_malloc(a) vmalloc(a)
82431diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
82432index ce447f0..83c66bd 100644
82433--- a/include/linux/devfreq.h
82434+++ b/include/linux/devfreq.h
82435@@ -114,7 +114,7 @@ struct devfreq_governor {
82436 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
82437 int (*event_handler)(struct devfreq *devfreq,
82438 unsigned int event, void *data);
82439-};
82440+} __do_const;
82441
82442 /**
82443 * struct devfreq - Device devfreq structure
82444diff --git a/include/linux/device.h b/include/linux/device.h
82445index 0eb8ee2..c603b6a 100644
82446--- a/include/linux/device.h
82447+++ b/include/linux/device.h
82448@@ -311,7 +311,7 @@ struct subsys_interface {
82449 struct list_head node;
82450 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
82451 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
82452-};
82453+} __do_const;
82454
82455 int subsys_interface_register(struct subsys_interface *sif);
82456 void subsys_interface_unregister(struct subsys_interface *sif);
82457@@ -507,7 +507,7 @@ struct device_type {
82458 void (*release)(struct device *dev);
82459
82460 const struct dev_pm_ops *pm;
82461-};
82462+} __do_const;
82463
82464 /* interface for exporting device attributes */
82465 struct device_attribute {
82466@@ -517,11 +517,12 @@ struct device_attribute {
82467 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
82468 const char *buf, size_t count);
82469 };
82470+typedef struct device_attribute __no_const device_attribute_no_const;
82471
82472 struct dev_ext_attribute {
82473 struct device_attribute attr;
82474 void *var;
82475-};
82476+} __do_const;
82477
82478 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
82479 char *buf);
82480diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
82481index c3007cb..43efc8c 100644
82482--- a/include/linux/dma-mapping.h
82483+++ b/include/linux/dma-mapping.h
82484@@ -60,7 +60,7 @@ struct dma_map_ops {
82485 u64 (*get_required_mask)(struct device *dev);
82486 #endif
82487 int is_phys;
82488-};
82489+} __do_const;
82490
82491 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
82492
82493diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
82494index b6997a0..108be6c 100644
82495--- a/include/linux/dmaengine.h
82496+++ b/include/linux/dmaengine.h
82497@@ -1133,9 +1133,9 @@ struct dma_pinned_list {
82498 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
82499 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
82500
82501-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
82502+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
82503 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
82504-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
82505+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
82506 struct dma_pinned_list *pinned_list, struct page *page,
82507 unsigned int offset, size_t len);
82508
82509diff --git a/include/linux/efi.h b/include/linux/efi.h
82510index cf7e431..d239dce 100644
82511--- a/include/linux/efi.h
82512+++ b/include/linux/efi.h
82513@@ -1056,6 +1056,7 @@ struct efivar_operations {
82514 efi_set_variable_nonblocking_t *set_variable_nonblocking;
82515 efi_query_variable_store_t *query_variable_store;
82516 };
82517+typedef struct efivar_operations __no_const efivar_operations_no_const;
82518
82519 struct efivars {
82520 /*
82521diff --git a/include/linux/elf.h b/include/linux/elf.h
82522index 20fa8d8..3d0dd18 100644
82523--- a/include/linux/elf.h
82524+++ b/include/linux/elf.h
82525@@ -29,6 +29,7 @@ extern Elf32_Dyn _DYNAMIC [];
82526 #define elf_note elf32_note
82527 #define elf_addr_t Elf32_Off
82528 #define Elf_Half Elf32_Half
82529+#define elf_dyn Elf32_Dyn
82530
82531 #else
82532
82533@@ -39,6 +40,7 @@ extern Elf64_Dyn _DYNAMIC [];
82534 #define elf_note elf64_note
82535 #define elf_addr_t Elf64_Off
82536 #define Elf_Half Elf64_Half
82537+#define elf_dyn Elf64_Dyn
82538
82539 #endif
82540
82541diff --git a/include/linux/err.h b/include/linux/err.h
82542index a729120..6ede2c9 100644
82543--- a/include/linux/err.h
82544+++ b/include/linux/err.h
82545@@ -20,12 +20,12 @@
82546
82547 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
82548
82549-static inline void * __must_check ERR_PTR(long error)
82550+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
82551 {
82552 return (void *) error;
82553 }
82554
82555-static inline long __must_check PTR_ERR(__force const void *ptr)
82556+static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
82557 {
82558 return (long) ptr;
82559 }
82560diff --git a/include/linux/extcon.h b/include/linux/extcon.h
82561index 36f49c4..a2a1f4c 100644
82562--- a/include/linux/extcon.h
82563+++ b/include/linux/extcon.h
82564@@ -135,7 +135,7 @@ struct extcon_dev {
82565 /* /sys/class/extcon/.../mutually_exclusive/... */
82566 struct attribute_group attr_g_muex;
82567 struct attribute **attrs_muex;
82568- struct device_attribute *d_attrs_muex;
82569+ device_attribute_no_const *d_attrs_muex;
82570 };
82571
82572 /**
82573diff --git a/include/linux/fb.h b/include/linux/fb.h
82574index 043f328..180ccbf 100644
82575--- a/include/linux/fb.h
82576+++ b/include/linux/fb.h
82577@@ -305,7 +305,8 @@ struct fb_ops {
82578 /* called at KDB enter and leave time to prepare the console */
82579 int (*fb_debug_enter)(struct fb_info *info);
82580 int (*fb_debug_leave)(struct fb_info *info);
82581-};
82582+} __do_const;
82583+typedef struct fb_ops __no_const fb_ops_no_const;
82584
82585 #ifdef CONFIG_FB_TILEBLITTING
82586 #define FB_TILE_CURSOR_NONE 0
82587diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
82588index 230f87b..1fd0485 100644
82589--- a/include/linux/fdtable.h
82590+++ b/include/linux/fdtable.h
82591@@ -100,7 +100,7 @@ struct files_struct *get_files_struct(struct task_struct *);
82592 void put_files_struct(struct files_struct *fs);
82593 void reset_files_struct(struct files_struct *);
82594 int unshare_files(struct files_struct **);
82595-struct files_struct *dup_fd(struct files_struct *, int *);
82596+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
82597 void do_close_on_exec(struct files_struct *);
82598 int iterate_fd(struct files_struct *, unsigned,
82599 int (*)(const void *, struct file *, unsigned),
82600diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
82601index 8293262..2b3b8bd 100644
82602--- a/include/linux/frontswap.h
82603+++ b/include/linux/frontswap.h
82604@@ -11,7 +11,7 @@ struct frontswap_ops {
82605 int (*load)(unsigned, pgoff_t, struct page *);
82606 void (*invalidate_page)(unsigned, pgoff_t);
82607 void (*invalidate_area)(unsigned);
82608-};
82609+} __no_const;
82610
82611 extern bool frontswap_enabled;
82612 extern struct frontswap_ops *
82613diff --git a/include/linux/fs.h b/include/linux/fs.h
82614index 52cc449..58b25c9 100644
82615--- a/include/linux/fs.h
82616+++ b/include/linux/fs.h
82617@@ -410,7 +410,7 @@ struct address_space {
82618 spinlock_t private_lock; /* for use by the address_space */
82619 struct list_head private_list; /* ditto */
82620 void *private_data; /* ditto */
82621-} __attribute__((aligned(sizeof(long))));
82622+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
82623 /*
82624 * On most architectures that alignment is already the case; but
82625 * must be enforced here for CRIS, to let the least significant bit
82626@@ -453,7 +453,7 @@ struct block_device {
82627 int bd_fsfreeze_count;
82628 /* Mutex for freeze */
82629 struct mutex bd_fsfreeze_mutex;
82630-};
82631+} __randomize_layout;
82632
82633 /*
82634 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
82635@@ -639,7 +639,7 @@ struct inode {
82636 #endif
82637
82638 void *i_private; /* fs or device private pointer */
82639-};
82640+} __randomize_layout;
82641
82642 static inline int inode_unhashed(struct inode *inode)
82643 {
82644@@ -834,7 +834,7 @@ struct file {
82645 struct list_head f_tfile_llink;
82646 #endif /* #ifdef CONFIG_EPOLL */
82647 struct address_space *f_mapping;
82648-} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
82649+} __attribute__((aligned(4))) __randomize_layout; /* lest something weird decides that 2 is OK */
82650
82651 struct file_handle {
82652 __u32 handle_bytes;
82653@@ -962,7 +962,7 @@ struct file_lock {
82654 int state; /* state of grant or error if -ve */
82655 } afs;
82656 } fl_u;
82657-};
82658+} __randomize_layout;
82659
82660 struct file_lock_context {
82661 spinlock_t flc_lock;
82662@@ -1316,7 +1316,7 @@ struct super_block {
82663 * Indicates how deep in a filesystem stack this SB is
82664 */
82665 int s_stack_depth;
82666-};
82667+} __randomize_layout;
82668
82669 extern struct timespec current_fs_time(struct super_block *sb);
82670
82671@@ -1570,7 +1570,8 @@ struct file_operations {
82672 #ifndef CONFIG_MMU
82673 unsigned (*mmap_capabilities)(struct file *);
82674 #endif
82675-};
82676+} __do_const __randomize_layout;
82677+typedef struct file_operations __no_const file_operations_no_const;
82678
82679 struct inode_operations {
82680 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
82681@@ -2918,4 +2919,14 @@ static inline bool dir_relax(struct inode *inode)
82682 return !IS_DEADDIR(inode);
82683 }
82684
82685+static inline bool is_sidechannel_device(const struct inode *inode)
82686+{
82687+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
82688+ umode_t mode = inode->i_mode;
82689+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
82690+#else
82691+ return false;
82692+#endif
82693+}
82694+
82695 #endif /* _LINUX_FS_H */
82696diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
82697index 0efc3e6..fd23610 100644
82698--- a/include/linux/fs_struct.h
82699+++ b/include/linux/fs_struct.h
82700@@ -6,13 +6,13 @@
82701 #include <linux/seqlock.h>
82702
82703 struct fs_struct {
82704- int users;
82705+ atomic_t users;
82706 spinlock_t lock;
82707 seqcount_t seq;
82708 int umask;
82709 int in_exec;
82710 struct path root, pwd;
82711-};
82712+} __randomize_layout;
82713
82714 extern struct kmem_cache *fs_cachep;
82715
82716diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
82717index 7714849..a4a5c7a 100644
82718--- a/include/linux/fscache-cache.h
82719+++ b/include/linux/fscache-cache.h
82720@@ -113,7 +113,7 @@ struct fscache_operation {
82721 fscache_operation_release_t release;
82722 };
82723
82724-extern atomic_t fscache_op_debug_id;
82725+extern atomic_unchecked_t fscache_op_debug_id;
82726 extern void fscache_op_work_func(struct work_struct *work);
82727
82728 extern void fscache_enqueue_operation(struct fscache_operation *);
82729@@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
82730 INIT_WORK(&op->work, fscache_op_work_func);
82731 atomic_set(&op->usage, 1);
82732 op->state = FSCACHE_OP_ST_INITIALISED;
82733- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
82734+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
82735 op->processor = processor;
82736 op->release = release;
82737 INIT_LIST_HEAD(&op->pend_link);
82738diff --git a/include/linux/fscache.h b/include/linux/fscache.h
82739index 115bb81..e7b812b 100644
82740--- a/include/linux/fscache.h
82741+++ b/include/linux/fscache.h
82742@@ -152,7 +152,7 @@ struct fscache_cookie_def {
82743 * - this is mandatory for any object that may have data
82744 */
82745 void (*now_uncached)(void *cookie_netfs_data);
82746-};
82747+} __do_const;
82748
82749 /*
82750 * fscache cached network filesystem type
82751diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
82752index 7ee1774..72505b8 100644
82753--- a/include/linux/fsnotify.h
82754+++ b/include/linux/fsnotify.h
82755@@ -197,6 +197,9 @@ static inline void fsnotify_access(struct file *file)
82756 struct inode *inode = file_inode(file);
82757 __u32 mask = FS_ACCESS;
82758
82759+ if (is_sidechannel_device(inode))
82760+ return;
82761+
82762 if (S_ISDIR(inode->i_mode))
82763 mask |= FS_ISDIR;
82764
82765@@ -215,6 +218,9 @@ static inline void fsnotify_modify(struct file *file)
82766 struct inode *inode = file_inode(file);
82767 __u32 mask = FS_MODIFY;
82768
82769+ if (is_sidechannel_device(inode))
82770+ return;
82771+
82772 if (S_ISDIR(inode->i_mode))
82773 mask |= FS_ISDIR;
82774
82775@@ -317,7 +323,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
82776 */
82777 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
82778 {
82779- return kstrdup(name, GFP_KERNEL);
82780+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
82781 }
82782
82783 /*
82784diff --git a/include/linux/genhd.h b/include/linux/genhd.h
82785index ec274e0..e678159 100644
82786--- a/include/linux/genhd.h
82787+++ b/include/linux/genhd.h
82788@@ -194,7 +194,7 @@ struct gendisk {
82789 struct kobject *slave_dir;
82790
82791 struct timer_rand_state *random;
82792- atomic_t sync_io; /* RAID */
82793+ atomic_unchecked_t sync_io; /* RAID */
82794 struct disk_events *ev;
82795 #ifdef CONFIG_BLK_DEV_INTEGRITY
82796 struct blk_integrity *integrity;
82797@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
82798 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
82799
82800 /* drivers/char/random.c */
82801-extern void add_disk_randomness(struct gendisk *disk);
82802+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
82803 extern void rand_initialize_disk(struct gendisk *disk);
82804
82805 static inline sector_t get_start_sect(struct block_device *bdev)
82806diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
82807index 667c311..abac2a7 100644
82808--- a/include/linux/genl_magic_func.h
82809+++ b/include/linux/genl_magic_func.h
82810@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
82811 },
82812
82813 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
82814-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
82815+static struct genl_ops ZZZ_genl_ops[] = {
82816 #include GENL_MAGIC_INCLUDE_FILE
82817 };
82818
82819diff --git a/include/linux/gfp.h b/include/linux/gfp.h
82820index 51bd1e7..0486343 100644
82821--- a/include/linux/gfp.h
82822+++ b/include/linux/gfp.h
82823@@ -34,6 +34,13 @@ struct vm_area_struct;
82824 #define ___GFP_NO_KSWAPD 0x400000u
82825 #define ___GFP_OTHER_NODE 0x800000u
82826 #define ___GFP_WRITE 0x1000000u
82827+
82828+#ifdef CONFIG_PAX_USERCOPY_SLABS
82829+#define ___GFP_USERCOPY 0x2000000u
82830+#else
82831+#define ___GFP_USERCOPY 0
82832+#endif
82833+
82834 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
82835
82836 /*
82837@@ -90,6 +97,7 @@ struct vm_area_struct;
82838 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
82839 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
82840 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
82841+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
82842
82843 /*
82844 * This may seem redundant, but it's a way of annotating false positives vs.
82845@@ -97,7 +105,7 @@ struct vm_area_struct;
82846 */
82847 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
82848
82849-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
82850+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
82851 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
82852
82853 /* This equals 0, but use constants in case they ever change */
82854@@ -152,6 +160,8 @@ struct vm_area_struct;
82855 /* 4GB DMA on some platforms */
82856 #define GFP_DMA32 __GFP_DMA32
82857
82858+#define GFP_USERCOPY __GFP_USERCOPY
82859+
82860 /* Convert GFP flags to their corresponding migrate type */
82861 static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
82862 {
82863diff --git a/include/linux/gracl.h b/include/linux/gracl.h
82864new file mode 100644
82865index 0000000..91858e4
82866--- /dev/null
82867+++ b/include/linux/gracl.h
82868@@ -0,0 +1,342 @@
82869+#ifndef GR_ACL_H
82870+#define GR_ACL_H
82871+
82872+#include <linux/grdefs.h>
82873+#include <linux/resource.h>
82874+#include <linux/capability.h>
82875+#include <linux/dcache.h>
82876+#include <asm/resource.h>
82877+
82878+/* Major status information */
82879+
82880+#define GR_VERSION "grsecurity 3.1"
82881+#define GRSECURITY_VERSION 0x3100
82882+
82883+enum {
82884+ GR_SHUTDOWN = 0,
82885+ GR_ENABLE = 1,
82886+ GR_SPROLE = 2,
82887+ GR_OLDRELOAD = 3,
82888+ GR_SEGVMOD = 4,
82889+ GR_STATUS = 5,
82890+ GR_UNSPROLE = 6,
82891+ GR_PASSSET = 7,
82892+ GR_SPROLEPAM = 8,
82893+ GR_RELOAD = 9,
82894+};
82895+
82896+/* Password setup definitions
82897+ * kernel/grhash.c */
82898+enum {
82899+ GR_PW_LEN = 128,
82900+ GR_SALT_LEN = 16,
82901+ GR_SHA_LEN = 32,
82902+};
82903+
82904+enum {
82905+ GR_SPROLE_LEN = 64,
82906+};
82907+
82908+enum {
82909+ GR_NO_GLOB = 0,
82910+ GR_REG_GLOB,
82911+ GR_CREATE_GLOB
82912+};
82913+
82914+#define GR_NLIMITS 32
82915+
82916+/* Begin Data Structures */
82917+
82918+struct sprole_pw {
82919+ unsigned char *rolename;
82920+ unsigned char salt[GR_SALT_LEN];
82921+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
82922+};
82923+
82924+struct name_entry {
82925+ __u32 key;
82926+ u64 inode;
82927+ dev_t device;
82928+ char *name;
82929+ __u16 len;
82930+ __u8 deleted;
82931+ struct name_entry *prev;
82932+ struct name_entry *next;
82933+};
82934+
82935+struct inodev_entry {
82936+ struct name_entry *nentry;
82937+ struct inodev_entry *prev;
82938+ struct inodev_entry *next;
82939+};
82940+
82941+struct acl_role_db {
82942+ struct acl_role_label **r_hash;
82943+ __u32 r_size;
82944+};
82945+
82946+struct inodev_db {
82947+ struct inodev_entry **i_hash;
82948+ __u32 i_size;
82949+};
82950+
82951+struct name_db {
82952+ struct name_entry **n_hash;
82953+ __u32 n_size;
82954+};
82955+
82956+struct crash_uid {
82957+ uid_t uid;
82958+ unsigned long expires;
82959+};
82960+
82961+struct gr_hash_struct {
82962+ void **table;
82963+ void **nametable;
82964+ void *first;
82965+ __u32 table_size;
82966+ __u32 used_size;
82967+ int type;
82968+};
82969+
82970+/* Userspace Grsecurity ACL data structures */
82971+
82972+struct acl_subject_label {
82973+ char *filename;
82974+ u64 inode;
82975+ dev_t device;
82976+ __u32 mode;
82977+ kernel_cap_t cap_mask;
82978+ kernel_cap_t cap_lower;
82979+ kernel_cap_t cap_invert_audit;
82980+
82981+ struct rlimit res[GR_NLIMITS];
82982+ __u32 resmask;
82983+
82984+ __u8 user_trans_type;
82985+ __u8 group_trans_type;
82986+ uid_t *user_transitions;
82987+ gid_t *group_transitions;
82988+ __u16 user_trans_num;
82989+ __u16 group_trans_num;
82990+
82991+ __u32 sock_families[2];
82992+ __u32 ip_proto[8];
82993+ __u32 ip_type;
82994+ struct acl_ip_label **ips;
82995+ __u32 ip_num;
82996+ __u32 inaddr_any_override;
82997+
82998+ __u32 crashes;
82999+ unsigned long expires;
83000+
83001+ struct acl_subject_label *parent_subject;
83002+ struct gr_hash_struct *hash;
83003+ struct acl_subject_label *prev;
83004+ struct acl_subject_label *next;
83005+
83006+ struct acl_object_label **obj_hash;
83007+ __u32 obj_hash_size;
83008+ __u16 pax_flags;
83009+};
83010+
83011+struct role_allowed_ip {
83012+ __u32 addr;
83013+ __u32 netmask;
83014+
83015+ struct role_allowed_ip *prev;
83016+ struct role_allowed_ip *next;
83017+};
83018+
83019+struct role_transition {
83020+ char *rolename;
83021+
83022+ struct role_transition *prev;
83023+ struct role_transition *next;
83024+};
83025+
83026+struct acl_role_label {
83027+ char *rolename;
83028+ uid_t uidgid;
83029+ __u16 roletype;
83030+
83031+ __u16 auth_attempts;
83032+ unsigned long expires;
83033+
83034+ struct acl_subject_label *root_label;
83035+ struct gr_hash_struct *hash;
83036+
83037+ struct acl_role_label *prev;
83038+ struct acl_role_label *next;
83039+
83040+ struct role_transition *transitions;
83041+ struct role_allowed_ip *allowed_ips;
83042+ uid_t *domain_children;
83043+ __u16 domain_child_num;
83044+
83045+ umode_t umask;
83046+
83047+ struct acl_subject_label **subj_hash;
83048+ __u32 subj_hash_size;
83049+};
83050+
83051+struct user_acl_role_db {
83052+ struct acl_role_label **r_table;
83053+ __u32 num_pointers; /* Number of allocations to track */
83054+ __u32 num_roles; /* Number of roles */
83055+ __u32 num_domain_children; /* Number of domain children */
83056+ __u32 num_subjects; /* Number of subjects */
83057+ __u32 num_objects; /* Number of objects */
83058+};
83059+
83060+struct acl_object_label {
83061+ char *filename;
83062+ u64 inode;
83063+ dev_t device;
83064+ __u32 mode;
83065+
83066+ struct acl_subject_label *nested;
83067+ struct acl_object_label *globbed;
83068+
83069+ /* next two structures not used */
83070+
83071+ struct acl_object_label *prev;
83072+ struct acl_object_label *next;
83073+};
83074+
83075+struct acl_ip_label {
83076+ char *iface;
83077+ __u32 addr;
83078+ __u32 netmask;
83079+ __u16 low, high;
83080+ __u8 mode;
83081+ __u32 type;
83082+ __u32 proto[8];
83083+
83084+ /* next two structures not used */
83085+
83086+ struct acl_ip_label *prev;
83087+ struct acl_ip_label *next;
83088+};
83089+
83090+struct gr_arg {
83091+ struct user_acl_role_db role_db;
83092+ unsigned char pw[GR_PW_LEN];
83093+ unsigned char salt[GR_SALT_LEN];
83094+ unsigned char sum[GR_SHA_LEN];
83095+ unsigned char sp_role[GR_SPROLE_LEN];
83096+ struct sprole_pw *sprole_pws;
83097+ dev_t segv_device;
83098+ u64 segv_inode;
83099+ uid_t segv_uid;
83100+ __u16 num_sprole_pws;
83101+ __u16 mode;
83102+};
83103+
83104+struct gr_arg_wrapper {
83105+ struct gr_arg *arg;
83106+ __u32 version;
83107+ __u32 size;
83108+};
83109+
83110+struct subject_map {
83111+ struct acl_subject_label *user;
83112+ struct acl_subject_label *kernel;
83113+ struct subject_map *prev;
83114+ struct subject_map *next;
83115+};
83116+
83117+struct acl_subj_map_db {
83118+ struct subject_map **s_hash;
83119+ __u32 s_size;
83120+};
83121+
83122+struct gr_policy_state {
83123+ struct sprole_pw **acl_special_roles;
83124+ __u16 num_sprole_pws;
83125+ struct acl_role_label *kernel_role;
83126+ struct acl_role_label *role_list;
83127+ struct acl_role_label *default_role;
83128+ struct acl_role_db acl_role_set;
83129+ struct acl_subj_map_db subj_map_set;
83130+ struct name_db name_set;
83131+ struct inodev_db inodev_set;
83132+};
83133+
83134+struct gr_alloc_state {
83135+ unsigned long alloc_stack_next;
83136+ unsigned long alloc_stack_size;
83137+ void **alloc_stack;
83138+};
83139+
83140+struct gr_reload_state {
83141+ struct gr_policy_state oldpolicy;
83142+ struct gr_alloc_state oldalloc;
83143+ struct gr_policy_state newpolicy;
83144+ struct gr_alloc_state newalloc;
83145+ struct gr_policy_state *oldpolicy_ptr;
83146+ struct gr_alloc_state *oldalloc_ptr;
83147+ unsigned char oldmode;
83148+};
83149+
83150+/* End Data Structures Section */
83151+
83152+/* Hash functions generated by empirical testing by Brad Spengler
83153+ Makes good use of the low bits of the inode. Generally 0-1 times
83154+ in loop for successful match. 0-3 for unsuccessful match.
83155+ Shift/add algorithm with modulus of table size and an XOR*/
83156+
83157+static __inline__ unsigned int
83158+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
83159+{
83160+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
83161+}
83162+
83163+ static __inline__ unsigned int
83164+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
83165+{
83166+ return ((const unsigned long)userp % sz);
83167+}
83168+
83169+static __inline__ unsigned int
83170+gr_fhash(const u64 ino, const dev_t dev, const unsigned int sz)
83171+{
83172+ unsigned int rem;
83173+ div_u64_rem((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9)), sz, &rem);
83174+ return rem;
83175+}
83176+
83177+static __inline__ unsigned int
83178+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
83179+{
83180+ return full_name_hash((const unsigned char *)name, len) % sz;
83181+}
83182+
83183+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
83184+ subj = NULL; \
83185+ iter = 0; \
83186+ while (iter < role->subj_hash_size) { \
83187+ if (subj == NULL) \
83188+ subj = role->subj_hash[iter]; \
83189+ if (subj == NULL) { \
83190+ iter++; \
83191+ continue; \
83192+ }
83193+
83194+#define FOR_EACH_SUBJECT_END(subj,iter) \
83195+ subj = subj->next; \
83196+ if (subj == NULL) \
83197+ iter++; \
83198+ }
83199+
83200+
83201+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
83202+ subj = role->hash->first; \
83203+ while (subj != NULL) {
83204+
83205+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
83206+ subj = subj->next; \
83207+ }
83208+
83209+#endif
83210+
83211diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
83212new file mode 100644
83213index 0000000..af64092
83214--- /dev/null
83215+++ b/include/linux/gracl_compat.h
83216@@ -0,0 +1,156 @@
83217+#ifndef GR_ACL_COMPAT_H
83218+#define GR_ACL_COMPAT_H
83219+
83220+#include <linux/resource.h>
83221+#include <asm/resource.h>
83222+
83223+struct sprole_pw_compat {
83224+ compat_uptr_t rolename;
83225+ unsigned char salt[GR_SALT_LEN];
83226+ unsigned char sum[GR_SHA_LEN];
83227+};
83228+
83229+struct gr_hash_struct_compat {
83230+ compat_uptr_t table;
83231+ compat_uptr_t nametable;
83232+ compat_uptr_t first;
83233+ __u32 table_size;
83234+ __u32 used_size;
83235+ int type;
83236+};
83237+
83238+struct acl_subject_label_compat {
83239+ compat_uptr_t filename;
83240+ compat_u64 inode;
83241+ __u32 device;
83242+ __u32 mode;
83243+ kernel_cap_t cap_mask;
83244+ kernel_cap_t cap_lower;
83245+ kernel_cap_t cap_invert_audit;
83246+
83247+ struct compat_rlimit res[GR_NLIMITS];
83248+ __u32 resmask;
83249+
83250+ __u8 user_trans_type;
83251+ __u8 group_trans_type;
83252+ compat_uptr_t user_transitions;
83253+ compat_uptr_t group_transitions;
83254+ __u16 user_trans_num;
83255+ __u16 group_trans_num;
83256+
83257+ __u32 sock_families[2];
83258+ __u32 ip_proto[8];
83259+ __u32 ip_type;
83260+ compat_uptr_t ips;
83261+ __u32 ip_num;
83262+ __u32 inaddr_any_override;
83263+
83264+ __u32 crashes;
83265+ compat_ulong_t expires;
83266+
83267+ compat_uptr_t parent_subject;
83268+ compat_uptr_t hash;
83269+ compat_uptr_t prev;
83270+ compat_uptr_t next;
83271+
83272+ compat_uptr_t obj_hash;
83273+ __u32 obj_hash_size;
83274+ __u16 pax_flags;
83275+};
83276+
83277+struct role_allowed_ip_compat {
83278+ __u32 addr;
83279+ __u32 netmask;
83280+
83281+ compat_uptr_t prev;
83282+ compat_uptr_t next;
83283+};
83284+
83285+struct role_transition_compat {
83286+ compat_uptr_t rolename;
83287+
83288+ compat_uptr_t prev;
83289+ compat_uptr_t next;
83290+};
83291+
83292+struct acl_role_label_compat {
83293+ compat_uptr_t rolename;
83294+ uid_t uidgid;
83295+ __u16 roletype;
83296+
83297+ __u16 auth_attempts;
83298+ compat_ulong_t expires;
83299+
83300+ compat_uptr_t root_label;
83301+ compat_uptr_t hash;
83302+
83303+ compat_uptr_t prev;
83304+ compat_uptr_t next;
83305+
83306+ compat_uptr_t transitions;
83307+ compat_uptr_t allowed_ips;
83308+ compat_uptr_t domain_children;
83309+ __u16 domain_child_num;
83310+
83311+ umode_t umask;
83312+
83313+ compat_uptr_t subj_hash;
83314+ __u32 subj_hash_size;
83315+};
83316+
83317+struct user_acl_role_db_compat {
83318+ compat_uptr_t r_table;
83319+ __u32 num_pointers;
83320+ __u32 num_roles;
83321+ __u32 num_domain_children;
83322+ __u32 num_subjects;
83323+ __u32 num_objects;
83324+};
83325+
83326+struct acl_object_label_compat {
83327+ compat_uptr_t filename;
83328+ compat_u64 inode;
83329+ __u32 device;
83330+ __u32 mode;
83331+
83332+ compat_uptr_t nested;
83333+ compat_uptr_t globbed;
83334+
83335+ compat_uptr_t prev;
83336+ compat_uptr_t next;
83337+};
83338+
83339+struct acl_ip_label_compat {
83340+ compat_uptr_t iface;
83341+ __u32 addr;
83342+ __u32 netmask;
83343+ __u16 low, high;
83344+ __u8 mode;
83345+ __u32 type;
83346+ __u32 proto[8];
83347+
83348+ compat_uptr_t prev;
83349+ compat_uptr_t next;
83350+};
83351+
83352+struct gr_arg_compat {
83353+ struct user_acl_role_db_compat role_db;
83354+ unsigned char pw[GR_PW_LEN];
83355+ unsigned char salt[GR_SALT_LEN];
83356+ unsigned char sum[GR_SHA_LEN];
83357+ unsigned char sp_role[GR_SPROLE_LEN];
83358+ compat_uptr_t sprole_pws;
83359+ __u32 segv_device;
83360+ compat_u64 segv_inode;
83361+ uid_t segv_uid;
83362+ __u16 num_sprole_pws;
83363+ __u16 mode;
83364+};
83365+
83366+struct gr_arg_wrapper_compat {
83367+ compat_uptr_t arg;
83368+ __u32 version;
83369+ __u32 size;
83370+};
83371+
83372+#endif
83373diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
83374new file mode 100644
83375index 0000000..323ecf2
83376--- /dev/null
83377+++ b/include/linux/gralloc.h
83378@@ -0,0 +1,9 @@
83379+#ifndef __GRALLOC_H
83380+#define __GRALLOC_H
83381+
83382+void acl_free_all(void);
83383+int acl_alloc_stack_init(unsigned long size);
83384+void *acl_alloc(unsigned long len);
83385+void *acl_alloc_num(unsigned long num, unsigned long len);
83386+
83387+#endif
83388diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
83389new file mode 100644
83390index 0000000..be66033
83391--- /dev/null
83392+++ b/include/linux/grdefs.h
83393@@ -0,0 +1,140 @@
83394+#ifndef GRDEFS_H
83395+#define GRDEFS_H
83396+
83397+/* Begin grsecurity status declarations */
83398+
83399+enum {
83400+ GR_READY = 0x01,
83401+ GR_STATUS_INIT = 0x00 // disabled state
83402+};
83403+
83404+/* Begin ACL declarations */
83405+
83406+/* Role flags */
83407+
83408+enum {
83409+ GR_ROLE_USER = 0x0001,
83410+ GR_ROLE_GROUP = 0x0002,
83411+ GR_ROLE_DEFAULT = 0x0004,
83412+ GR_ROLE_SPECIAL = 0x0008,
83413+ GR_ROLE_AUTH = 0x0010,
83414+ GR_ROLE_NOPW = 0x0020,
83415+ GR_ROLE_GOD = 0x0040,
83416+ GR_ROLE_LEARN = 0x0080,
83417+ GR_ROLE_TPE = 0x0100,
83418+ GR_ROLE_DOMAIN = 0x0200,
83419+ GR_ROLE_PAM = 0x0400,
83420+ GR_ROLE_PERSIST = 0x0800
83421+};
83422+
83423+/* ACL Subject and Object mode flags */
83424+enum {
83425+ GR_DELETED = 0x80000000
83426+};
83427+
83428+/* ACL Object-only mode flags */
83429+enum {
83430+ GR_READ = 0x00000001,
83431+ GR_APPEND = 0x00000002,
83432+ GR_WRITE = 0x00000004,
83433+ GR_EXEC = 0x00000008,
83434+ GR_FIND = 0x00000010,
83435+ GR_INHERIT = 0x00000020,
83436+ GR_SETID = 0x00000040,
83437+ GR_CREATE = 0x00000080,
83438+ GR_DELETE = 0x00000100,
83439+ GR_LINK = 0x00000200,
83440+ GR_AUDIT_READ = 0x00000400,
83441+ GR_AUDIT_APPEND = 0x00000800,
83442+ GR_AUDIT_WRITE = 0x00001000,
83443+ GR_AUDIT_EXEC = 0x00002000,
83444+ GR_AUDIT_FIND = 0x00004000,
83445+ GR_AUDIT_INHERIT= 0x00008000,
83446+ GR_AUDIT_SETID = 0x00010000,
83447+ GR_AUDIT_CREATE = 0x00020000,
83448+ GR_AUDIT_DELETE = 0x00040000,
83449+ GR_AUDIT_LINK = 0x00080000,
83450+ GR_PTRACERD = 0x00100000,
83451+ GR_NOPTRACE = 0x00200000,
83452+ GR_SUPPRESS = 0x00400000,
83453+ GR_NOLEARN = 0x00800000,
83454+ GR_INIT_TRANSFER= 0x01000000
83455+};
83456+
83457+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
83458+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
83459+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
83460+
83461+/* ACL subject-only mode flags */
83462+enum {
83463+ GR_KILL = 0x00000001,
83464+ GR_VIEW = 0x00000002,
83465+ GR_PROTECTED = 0x00000004,
83466+ GR_LEARN = 0x00000008,
83467+ GR_OVERRIDE = 0x00000010,
83468+ /* just a placeholder, this mode is only used in userspace */
83469+ GR_DUMMY = 0x00000020,
83470+ GR_PROTSHM = 0x00000040,
83471+ GR_KILLPROC = 0x00000080,
83472+ GR_KILLIPPROC = 0x00000100,
83473+ /* just a placeholder, this mode is only used in userspace */
83474+ GR_NOTROJAN = 0x00000200,
83475+ GR_PROTPROCFD = 0x00000400,
83476+ GR_PROCACCT = 0x00000800,
83477+ GR_RELAXPTRACE = 0x00001000,
83478+ //GR_NESTED = 0x00002000,
83479+ GR_INHERITLEARN = 0x00004000,
83480+ GR_PROCFIND = 0x00008000,
83481+ GR_POVERRIDE = 0x00010000,
83482+ GR_KERNELAUTH = 0x00020000,
83483+ GR_ATSECURE = 0x00040000,
83484+ GR_SHMEXEC = 0x00080000
83485+};
83486+
83487+enum {
83488+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
83489+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
83490+ GR_PAX_ENABLE_MPROTECT = 0x0004,
83491+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
83492+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
83493+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
83494+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
83495+ GR_PAX_DISABLE_MPROTECT = 0x0400,
83496+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
83497+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
83498+};
83499+
83500+enum {
83501+ GR_ID_USER = 0x01,
83502+ GR_ID_GROUP = 0x02,
83503+};
83504+
83505+enum {
83506+ GR_ID_ALLOW = 0x01,
83507+ GR_ID_DENY = 0x02,
83508+};
83509+
83510+#define GR_CRASH_RES 31
83511+#define GR_UIDTABLE_MAX 500
83512+
83513+/* begin resource learning section */
83514+enum {
83515+ GR_RLIM_CPU_BUMP = 60,
83516+ GR_RLIM_FSIZE_BUMP = 50000,
83517+ GR_RLIM_DATA_BUMP = 10000,
83518+ GR_RLIM_STACK_BUMP = 1000,
83519+ GR_RLIM_CORE_BUMP = 10000,
83520+ GR_RLIM_RSS_BUMP = 500000,
83521+ GR_RLIM_NPROC_BUMP = 1,
83522+ GR_RLIM_NOFILE_BUMP = 5,
83523+ GR_RLIM_MEMLOCK_BUMP = 50000,
83524+ GR_RLIM_AS_BUMP = 500000,
83525+ GR_RLIM_LOCKS_BUMP = 2,
83526+ GR_RLIM_SIGPENDING_BUMP = 5,
83527+ GR_RLIM_MSGQUEUE_BUMP = 10000,
83528+ GR_RLIM_NICE_BUMP = 1,
83529+ GR_RLIM_RTPRIO_BUMP = 1,
83530+ GR_RLIM_RTTIME_BUMP = 1000000
83531+};
83532+
83533+#endif
83534diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
83535new file mode 100644
83536index 0000000..fb1de5d
83537--- /dev/null
83538+++ b/include/linux/grinternal.h
83539@@ -0,0 +1,230 @@
83540+#ifndef __GRINTERNAL_H
83541+#define __GRINTERNAL_H
83542+
83543+#ifdef CONFIG_GRKERNSEC
83544+
83545+#include <linux/fs.h>
83546+#include <linux/mnt_namespace.h>
83547+#include <linux/nsproxy.h>
83548+#include <linux/gracl.h>
83549+#include <linux/grdefs.h>
83550+#include <linux/grmsg.h>
83551+
83552+void gr_add_learn_entry(const char *fmt, ...)
83553+ __attribute__ ((format (printf, 1, 2)));
83554+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
83555+ const struct vfsmount *mnt);
83556+__u32 gr_check_create(const struct dentry *new_dentry,
83557+ const struct dentry *parent,
83558+ const struct vfsmount *mnt, const __u32 mode);
83559+int gr_check_protected_task(const struct task_struct *task);
83560+__u32 to_gr_audit(const __u32 reqmode);
83561+int gr_set_acls(const int type);
83562+int gr_acl_is_enabled(void);
83563+char gr_roletype_to_char(void);
83564+
83565+void gr_handle_alertkill(struct task_struct *task);
83566+char *gr_to_filename(const struct dentry *dentry,
83567+ const struct vfsmount *mnt);
83568+char *gr_to_filename1(const struct dentry *dentry,
83569+ const struct vfsmount *mnt);
83570+char *gr_to_filename2(const struct dentry *dentry,
83571+ const struct vfsmount *mnt);
83572+char *gr_to_filename3(const struct dentry *dentry,
83573+ const struct vfsmount *mnt);
83574+
83575+extern int grsec_enable_ptrace_readexec;
83576+extern int grsec_enable_harden_ptrace;
83577+extern int grsec_enable_link;
83578+extern int grsec_enable_fifo;
83579+extern int grsec_enable_execve;
83580+extern int grsec_enable_shm;
83581+extern int grsec_enable_execlog;
83582+extern int grsec_enable_signal;
83583+extern int grsec_enable_audit_ptrace;
83584+extern int grsec_enable_forkfail;
83585+extern int grsec_enable_time;
83586+extern int grsec_enable_rofs;
83587+extern int grsec_deny_new_usb;
83588+extern int grsec_enable_chroot_shmat;
83589+extern int grsec_enable_chroot_mount;
83590+extern int grsec_enable_chroot_double;
83591+extern int grsec_enable_chroot_pivot;
83592+extern int grsec_enable_chroot_chdir;
83593+extern int grsec_enable_chroot_chmod;
83594+extern int grsec_enable_chroot_mknod;
83595+extern int grsec_enable_chroot_fchdir;
83596+extern int grsec_enable_chroot_nice;
83597+extern int grsec_enable_chroot_execlog;
83598+extern int grsec_enable_chroot_caps;
83599+extern int grsec_enable_chroot_rename;
83600+extern int grsec_enable_chroot_sysctl;
83601+extern int grsec_enable_chroot_unix;
83602+extern int grsec_enable_symlinkown;
83603+extern kgid_t grsec_symlinkown_gid;
83604+extern int grsec_enable_tpe;
83605+extern kgid_t grsec_tpe_gid;
83606+extern int grsec_enable_tpe_all;
83607+extern int grsec_enable_tpe_invert;
83608+extern int grsec_enable_socket_all;
83609+extern kgid_t grsec_socket_all_gid;
83610+extern int grsec_enable_socket_client;
83611+extern kgid_t grsec_socket_client_gid;
83612+extern int grsec_enable_socket_server;
83613+extern kgid_t grsec_socket_server_gid;
83614+extern kgid_t grsec_audit_gid;
83615+extern int grsec_enable_group;
83616+extern int grsec_enable_log_rwxmaps;
83617+extern int grsec_enable_mount;
83618+extern int grsec_enable_chdir;
83619+extern int grsec_resource_logging;
83620+extern int grsec_enable_blackhole;
83621+extern int grsec_lastack_retries;
83622+extern int grsec_enable_brute;
83623+extern int grsec_enable_harden_ipc;
83624+extern int grsec_lock;
83625+
83626+extern spinlock_t grsec_alert_lock;
83627+extern unsigned long grsec_alert_wtime;
83628+extern unsigned long grsec_alert_fyet;
83629+
83630+extern spinlock_t grsec_audit_lock;
83631+
83632+extern rwlock_t grsec_exec_file_lock;
83633+
83634+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
83635+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
83636+ (tsk)->exec_file->f_path.mnt) : "/")
83637+
83638+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
83639+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
83640+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
83641+
83642+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
83643+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
83644+ (tsk)->exec_file->f_path.mnt) : "/")
83645+
83646+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
83647+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
83648+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
83649+
83650+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
83651+
83652+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
83653+
83654+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
83655+{
83656+ if (file1 && file2) {
83657+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
83658+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
83659+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
83660+ return true;
83661+ }
83662+
83663+ return false;
83664+}
83665+
83666+#define GR_CHROOT_CAPS {{ \
83667+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
83668+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
83669+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
83670+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
83671+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
83672+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
83673+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
83674+
83675+#define security_learn(normal_msg,args...) \
83676+({ \
83677+ read_lock(&grsec_exec_file_lock); \
83678+ gr_add_learn_entry(normal_msg "\n", ## args); \
83679+ read_unlock(&grsec_exec_file_lock); \
83680+})
83681+
83682+enum {
83683+ GR_DO_AUDIT,
83684+ GR_DONT_AUDIT,
83685+ /* used for non-audit messages that we shouldn't kill the task on */
83686+ GR_DONT_AUDIT_GOOD
83687+};
83688+
83689+enum {
83690+ GR_TTYSNIFF,
83691+ GR_RBAC,
83692+ GR_RBAC_STR,
83693+ GR_STR_RBAC,
83694+ GR_RBAC_MODE2,
83695+ GR_RBAC_MODE3,
83696+ GR_FILENAME,
83697+ GR_SYSCTL_HIDDEN,
83698+ GR_NOARGS,
83699+ GR_ONE_INT,
83700+ GR_ONE_INT_TWO_STR,
83701+ GR_ONE_STR,
83702+ GR_STR_INT,
83703+ GR_TWO_STR_INT,
83704+ GR_TWO_INT,
83705+ GR_TWO_U64,
83706+ GR_THREE_INT,
83707+ GR_FIVE_INT_TWO_STR,
83708+ GR_TWO_STR,
83709+ GR_THREE_STR,
83710+ GR_FOUR_STR,
83711+ GR_STR_FILENAME,
83712+ GR_FILENAME_STR,
83713+ GR_FILENAME_TWO_INT,
83714+ GR_FILENAME_TWO_INT_STR,
83715+ GR_TEXTREL,
83716+ GR_PTRACE,
83717+ GR_RESOURCE,
83718+ GR_CAP,
83719+ GR_SIG,
83720+ GR_SIG2,
83721+ GR_CRASH1,
83722+ GR_CRASH2,
83723+ GR_PSACCT,
83724+ GR_RWXMAP,
83725+ GR_RWXMAPVMA
83726+};
83727+
83728+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
83729+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
83730+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
83731+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
83732+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
83733+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
83734+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
83735+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
83736+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
83737+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
83738+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
83739+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
83740+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
83741+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
83742+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
83743+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
83744+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
83745+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
83746+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
83747+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
83748+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
83749+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
83750+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
83751+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
83752+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
83753+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
83754+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
83755+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
83756+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
83757+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
83758+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
83759+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
83760+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
83761+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
83762+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
83763+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
83764+
83765+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
83766+
83767+#endif
83768+
83769+#endif
83770diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
83771new file mode 100644
83772index 0000000..26ef560
83773--- /dev/null
83774+++ b/include/linux/grmsg.h
83775@@ -0,0 +1,118 @@
83776+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
83777+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
83778+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
83779+#define GR_STOPMOD_MSG "denied modification of module state by "
83780+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
83781+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
83782+#define GR_IOPERM_MSG "denied use of ioperm() by "
83783+#define GR_IOPL_MSG "denied use of iopl() by "
83784+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
83785+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
83786+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
83787+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
83788+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
83789+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
83790+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
83791+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
83792+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
83793+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
83794+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
83795+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
83796+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
83797+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
83798+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
83799+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
83800+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
83801+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
83802+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
83803+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
83804+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
83805+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
83806+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
83807+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
83808+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
83809+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
83810+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
83811+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
83812+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
83813+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
83814+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
83815+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
83816+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
83817+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
83818+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
83819+#define GR_CHROOT_RENAME_MSG "denied bad rename of %.950s out of a chroot by "
83820+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
83821+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
83822+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
83823+#define GR_CHROOT_FHANDLE_MSG "denied use of file handles inside chroot by "
83824+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
83825+#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
83826+#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
83827+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
83828+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
83829+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
83830+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
83831+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
83832+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
83833+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
83834+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
83835+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
83836+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
83837+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
83838+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
83839+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
83840+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
83841+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
83842+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
83843+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
83844+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
83845+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
83846+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
83847+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
83848+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
83849+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
83850+#define GR_FAILFORK_MSG "failed fork with errno %s by "
83851+#define GR_NICE_CHROOT_MSG "denied priority change by "
83852+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
83853+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
83854+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
83855+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
83856+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
83857+#define GR_TIME_MSG "time set by "
83858+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
83859+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
83860+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
83861+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
83862+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
83863+#define GR_BIND_MSG "denied bind() by "
83864+#define GR_CONNECT_MSG "denied connect() by "
83865+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
83866+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
83867+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
83868+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
83869+#define GR_CAP_ACL_MSG "use of %s denied for "
83870+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
83871+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
83872+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
83873+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
83874+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
83875+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
83876+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
83877+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
83878+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
83879+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
83880+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
83881+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
83882+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
83883+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
83884+#define GR_VM86_MSG "denied use of vm86 by "
83885+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
83886+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
83887+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
83888+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
83889+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
83890+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
83891+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
83892+#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
83893+#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
83894diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
83895new file mode 100644
83896index 0000000..63c1850
83897--- /dev/null
83898+++ b/include/linux/grsecurity.h
83899@@ -0,0 +1,250 @@
83900+#ifndef GR_SECURITY_H
83901+#define GR_SECURITY_H
83902+#include <linux/fs.h>
83903+#include <linux/fs_struct.h>
83904+#include <linux/binfmts.h>
83905+#include <linux/gracl.h>
83906+
83907+/* notify of brain-dead configs */
83908+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
83909+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
83910+#endif
83911+#if defined(CONFIG_GRKERNSEC_PROC) && !defined(CONFIG_GRKERNSEC_PROC_USER) && !defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
83912+#error "CONFIG_GRKERNSEC_PROC enabled, but neither CONFIG_GRKERNSEC_PROC_USER nor CONFIG_GRKERNSEC_PROC_USERGROUP enabled"
83913+#endif
83914+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
83915+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
83916+#endif
83917+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
83918+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
83919+#endif
83920+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
83921+#error "CONFIG_PAX enabled, but no PaX options are enabled."
83922+#endif
83923+
83924+int gr_handle_new_usb(void);
83925+
83926+void gr_handle_brute_attach(int dumpable);
83927+void gr_handle_brute_check(void);
83928+void gr_handle_kernel_exploit(void);
83929+
83930+char gr_roletype_to_char(void);
83931+
83932+int gr_proc_is_restricted(void);
83933+
83934+int gr_acl_enable_at_secure(void);
83935+
83936+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
83937+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
83938+
83939+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap);
83940+
83941+void gr_del_task_from_ip_table(struct task_struct *p);
83942+
83943+int gr_pid_is_chrooted(struct task_struct *p);
83944+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
83945+int gr_handle_chroot_nice(void);
83946+int gr_handle_chroot_sysctl(const int op);
83947+int gr_handle_chroot_setpriority(struct task_struct *p,
83948+ const int niceval);
83949+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
83950+int gr_chroot_fhandle(void);
83951+int gr_handle_chroot_chroot(const struct dentry *dentry,
83952+ const struct vfsmount *mnt);
83953+void gr_handle_chroot_chdir(const struct path *path);
83954+int gr_handle_chroot_chmod(const struct dentry *dentry,
83955+ const struct vfsmount *mnt, const int mode);
83956+int gr_handle_chroot_mknod(const struct dentry *dentry,
83957+ const struct vfsmount *mnt, const int mode);
83958+int gr_handle_chroot_mount(const struct dentry *dentry,
83959+ const struct vfsmount *mnt,
83960+ const char *dev_name);
83961+int gr_handle_chroot_pivot(void);
83962+int gr_handle_chroot_unix(const pid_t pid);
83963+
83964+int gr_handle_rawio(const struct inode *inode);
83965+
83966+void gr_handle_ioperm(void);
83967+void gr_handle_iopl(void);
83968+void gr_handle_msr_write(void);
83969+
83970+umode_t gr_acl_umask(void);
83971+
83972+int gr_tpe_allow(const struct file *file);
83973+
83974+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
83975+void gr_clear_chroot_entries(struct task_struct *task);
83976+
83977+void gr_log_forkfail(const int retval);
83978+void gr_log_timechange(void);
83979+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
83980+void gr_log_chdir(const struct dentry *dentry,
83981+ const struct vfsmount *mnt);
83982+void gr_log_chroot_exec(const struct dentry *dentry,
83983+ const struct vfsmount *mnt);
83984+void gr_log_remount(const char *devname, const int retval);
83985+void gr_log_unmount(const char *devname, const int retval);
83986+void gr_log_mount(const char *from, struct path *to, const int retval);
83987+void gr_log_textrel(struct vm_area_struct *vma);
83988+void gr_log_ptgnustack(struct file *file);
83989+void gr_log_rwxmmap(struct file *file);
83990+void gr_log_rwxmprotect(struct vm_area_struct *vma);
83991+
83992+int gr_handle_follow_link(const struct inode *parent,
83993+ const struct inode *inode,
83994+ const struct dentry *dentry,
83995+ const struct vfsmount *mnt);
83996+int gr_handle_fifo(const struct dentry *dentry,
83997+ const struct vfsmount *mnt,
83998+ const struct dentry *dir, const int flag,
83999+ const int acc_mode);
84000+int gr_handle_hardlink(const struct dentry *dentry,
84001+ const struct vfsmount *mnt,
84002+ struct inode *inode,
84003+ const int mode, const struct filename *to);
84004+
84005+int gr_is_capable(const int cap);
84006+int gr_is_capable_nolog(const int cap);
84007+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
84008+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
84009+
84010+void gr_copy_label(struct task_struct *tsk);
84011+void gr_handle_crash(struct task_struct *task, const int sig);
84012+int gr_handle_signal(const struct task_struct *p, const int sig);
84013+int gr_check_crash_uid(const kuid_t uid);
84014+int gr_check_protected_task(const struct task_struct *task);
84015+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
84016+int gr_acl_handle_mmap(const struct file *file,
84017+ const unsigned long prot);
84018+int gr_acl_handle_mprotect(const struct file *file,
84019+ const unsigned long prot);
84020+int gr_check_hidden_task(const struct task_struct *tsk);
84021+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
84022+ const struct vfsmount *mnt);
84023+__u32 gr_acl_handle_utime(const struct dentry *dentry,
84024+ const struct vfsmount *mnt);
84025+__u32 gr_acl_handle_access(const struct dentry *dentry,
84026+ const struct vfsmount *mnt, const int fmode);
84027+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
84028+ const struct vfsmount *mnt, umode_t *mode);
84029+__u32 gr_acl_handle_chown(const struct dentry *dentry,
84030+ const struct vfsmount *mnt);
84031+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
84032+ const struct vfsmount *mnt);
84033+__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
84034+ const struct vfsmount *mnt);
84035+int gr_handle_ptrace(struct task_struct *task, const long request);
84036+int gr_handle_proc_ptrace(struct task_struct *task);
84037+__u32 gr_acl_handle_execve(const struct dentry *dentry,
84038+ const struct vfsmount *mnt);
84039+int gr_check_crash_exec(const struct file *filp);
84040+int gr_acl_is_enabled(void);
84041+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
84042+ const kgid_t gid);
84043+int gr_set_proc_label(const struct dentry *dentry,
84044+ const struct vfsmount *mnt,
84045+ const int unsafe_flags);
84046+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
84047+ const struct vfsmount *mnt);
84048+__u32 gr_acl_handle_open(const struct dentry *dentry,
84049+ const struct vfsmount *mnt, int acc_mode);
84050+__u32 gr_acl_handle_creat(const struct dentry *dentry,
84051+ const struct dentry *p_dentry,
84052+ const struct vfsmount *p_mnt,
84053+ int open_flags, int acc_mode, const int imode);
84054+void gr_handle_create(const struct dentry *dentry,
84055+ const struct vfsmount *mnt);
84056+void gr_handle_proc_create(const struct dentry *dentry,
84057+ const struct inode *inode);
84058+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
84059+ const struct dentry *parent_dentry,
84060+ const struct vfsmount *parent_mnt,
84061+ const int mode);
84062+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
84063+ const struct dentry *parent_dentry,
84064+ const struct vfsmount *parent_mnt);
84065+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
84066+ const struct vfsmount *mnt);
84067+void gr_handle_delete(const u64 ino, const dev_t dev);
84068+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
84069+ const struct vfsmount *mnt);
84070+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
84071+ const struct dentry *parent_dentry,
84072+ const struct vfsmount *parent_mnt,
84073+ const struct filename *from);
84074+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
84075+ const struct dentry *parent_dentry,
84076+ const struct vfsmount *parent_mnt,
84077+ const struct dentry *old_dentry,
84078+ const struct vfsmount *old_mnt, const struct filename *to);
84079+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
84080+int gr_acl_handle_rename(struct dentry *new_dentry,
84081+ struct dentry *parent_dentry,
84082+ const struct vfsmount *parent_mnt,
84083+ struct dentry *old_dentry,
84084+ struct inode *old_parent_inode,
84085+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags);
84086+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
84087+ struct dentry *old_dentry,
84088+ struct dentry *new_dentry,
84089+ struct vfsmount *mnt, const __u8 replace, unsigned int flags);
84090+__u32 gr_check_link(const struct dentry *new_dentry,
84091+ const struct dentry *parent_dentry,
84092+ const struct vfsmount *parent_mnt,
84093+ const struct dentry *old_dentry,
84094+ const struct vfsmount *old_mnt);
84095+int gr_acl_handle_filldir(const struct file *file, const char *name,
84096+ const unsigned int namelen, const u64 ino);
84097+
84098+__u32 gr_acl_handle_unix(const struct dentry *dentry,
84099+ const struct vfsmount *mnt);
84100+void gr_acl_handle_exit(void);
84101+void gr_acl_handle_psacct(struct task_struct *task, const long code);
84102+int gr_acl_handle_procpidmem(const struct task_struct *task);
84103+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
84104+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
84105+void gr_audit_ptrace(struct task_struct *task);
84106+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
84107+u64 gr_get_ino_from_dentry(struct dentry *dentry);
84108+void gr_put_exec_file(struct task_struct *task);
84109+
84110+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
84111+
84112+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
84113+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
84114+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
84115+ struct dentry *newdentry, struct vfsmount *newmnt);
84116+
84117+#ifdef CONFIG_GRKERNSEC_RESLOG
84118+extern void gr_log_resource(const struct task_struct *task, const int res,
84119+ const unsigned long wanted, const int gt);
84120+#else
84121+static inline void gr_log_resource(const struct task_struct *task, const int res,
84122+ const unsigned long wanted, const int gt)
84123+{
84124+}
84125+#endif
84126+
84127+#ifdef CONFIG_GRKERNSEC
84128+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
84129+void gr_handle_vm86(void);
84130+void gr_handle_mem_readwrite(u64 from, u64 to);
84131+
84132+void gr_log_badprocpid(const char *entry);
84133+
84134+extern int grsec_enable_dmesg;
84135+extern int grsec_disable_privio;
84136+
84137+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
84138+extern kgid_t grsec_proc_gid;
84139+#endif
84140+
84141+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
84142+extern int grsec_enable_chroot_findtask;
84143+#endif
84144+#ifdef CONFIG_GRKERNSEC_SETXID
84145+extern int grsec_enable_setxid;
84146+#endif
84147+#endif
84148+
84149+#endif
84150diff --git a/include/linux/grsock.h b/include/linux/grsock.h
84151new file mode 100644
84152index 0000000..e7ffaaf
84153--- /dev/null
84154+++ b/include/linux/grsock.h
84155@@ -0,0 +1,19 @@
84156+#ifndef __GRSOCK_H
84157+#define __GRSOCK_H
84158+
84159+extern void gr_attach_curr_ip(const struct sock *sk);
84160+extern int gr_handle_sock_all(const int family, const int type,
84161+ const int protocol);
84162+extern int gr_handle_sock_server(const struct sockaddr *sck);
84163+extern int gr_handle_sock_server_other(const struct sock *sck);
84164+extern int gr_handle_sock_client(const struct sockaddr *sck);
84165+extern int gr_search_connect(struct socket * sock,
84166+ struct sockaddr_in * addr);
84167+extern int gr_search_bind(struct socket * sock,
84168+ struct sockaddr_in * addr);
84169+extern int gr_search_listen(struct socket * sock);
84170+extern int gr_search_accept(struct socket * sock);
84171+extern int gr_search_socket(const int domain, const int type,
84172+ const int protocol);
84173+
84174+#endif
84175diff --git a/include/linux/highmem.h b/include/linux/highmem.h
84176index 9286a46..373f27f 100644
84177--- a/include/linux/highmem.h
84178+++ b/include/linux/highmem.h
84179@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
84180 kunmap_atomic(kaddr);
84181 }
84182
84183+static inline void sanitize_highpage(struct page *page)
84184+{
84185+ void *kaddr;
84186+ unsigned long flags;
84187+
84188+ local_irq_save(flags);
84189+ kaddr = kmap_atomic(page);
84190+ clear_page(kaddr);
84191+ kunmap_atomic(kaddr);
84192+ local_irq_restore(flags);
84193+}
84194+
84195 static inline void zero_user_segments(struct page *page,
84196 unsigned start1, unsigned end1,
84197 unsigned start2, unsigned end2)
84198diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
84199index 1c7b89a..7dda400 100644
84200--- a/include/linux/hwmon-sysfs.h
84201+++ b/include/linux/hwmon-sysfs.h
84202@@ -25,7 +25,8 @@
84203 struct sensor_device_attribute{
84204 struct device_attribute dev_attr;
84205 int index;
84206-};
84207+} __do_const;
84208+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
84209 #define to_sensor_dev_attr(_dev_attr) \
84210 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
84211
84212@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
84213 struct device_attribute dev_attr;
84214 u8 index;
84215 u8 nr;
84216-};
84217+} __do_const;
84218+typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
84219 #define to_sensor_dev_attr_2(_dev_attr) \
84220 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
84221
84222diff --git a/include/linux/i2c.h b/include/linux/i2c.h
84223index f17da50..2f8b203 100644
84224--- a/include/linux/i2c.h
84225+++ b/include/linux/i2c.h
84226@@ -409,6 +409,7 @@ struct i2c_algorithm {
84227 int (*unreg_slave)(struct i2c_client *client);
84228 #endif
84229 };
84230+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
84231
84232 /**
84233 * struct i2c_bus_recovery_info - I2C bus recovery information
84234diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
84235index aff7ad8..3942bbd 100644
84236--- a/include/linux/if_pppox.h
84237+++ b/include/linux/if_pppox.h
84238@@ -76,7 +76,7 @@ struct pppox_proto {
84239 int (*ioctl)(struct socket *sock, unsigned int cmd,
84240 unsigned long arg);
84241 struct module *owner;
84242-};
84243+} __do_const;
84244
84245 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
84246 extern void unregister_pppox_proto(int proto_num);
84247diff --git a/include/linux/init.h b/include/linux/init.h
84248index 2df8e8d..3e1280d 100644
84249--- a/include/linux/init.h
84250+++ b/include/linux/init.h
84251@@ -37,9 +37,17 @@
84252 * section.
84253 */
84254
84255+#define add_init_latent_entropy __latent_entropy
84256+
84257+#ifdef CONFIG_MEMORY_HOTPLUG
84258+#define add_meminit_latent_entropy
84259+#else
84260+#define add_meminit_latent_entropy __latent_entropy
84261+#endif
84262+
84263 /* These are for everybody (although not all archs will actually
84264 discard it in modules) */
84265-#define __init __section(.init.text) __cold notrace
84266+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
84267 #define __initdata __section(.init.data)
84268 #define __initconst __constsection(.init.rodata)
84269 #define __exitdata __section(.exit.data)
84270@@ -100,7 +108,7 @@
84271 #define __cpuexitconst
84272
84273 /* Used for MEMORY_HOTPLUG */
84274-#define __meminit __section(.meminit.text) __cold notrace
84275+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
84276 #define __meminitdata __section(.meminit.data)
84277 #define __meminitconst __constsection(.meminit.rodata)
84278 #define __memexit __section(.memexit.text) __exitused __cold notrace
84279diff --git a/include/linux/init_task.h b/include/linux/init_task.h
84280index 696d223..6d6b39f 100644
84281--- a/include/linux/init_task.h
84282+++ b/include/linux/init_task.h
84283@@ -158,6 +158,12 @@ extern struct task_group root_task_group;
84284
84285 #define INIT_TASK_COMM "swapper"
84286
84287+#ifdef CONFIG_X86
84288+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
84289+#else
84290+#define INIT_TASK_THREAD_INFO
84291+#endif
84292+
84293 #ifdef CONFIG_RT_MUTEXES
84294 # define INIT_RT_MUTEXES(tsk) \
84295 .pi_waiters = RB_ROOT, \
84296@@ -224,6 +230,7 @@ extern struct task_group root_task_group;
84297 RCU_POINTER_INITIALIZER(cred, &init_cred), \
84298 .comm = INIT_TASK_COMM, \
84299 .thread = INIT_THREAD, \
84300+ INIT_TASK_THREAD_INFO \
84301 .fs = &init_fs, \
84302 .files = &init_files, \
84303 .signal = &init_signals, \
84304diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
84305index 2e88580..f6a99a0 100644
84306--- a/include/linux/interrupt.h
84307+++ b/include/linux/interrupt.h
84308@@ -420,8 +420,8 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
84309
84310 struct softirq_action
84311 {
84312- void (*action)(struct softirq_action *);
84313-};
84314+ void (*action)(void);
84315+} __no_const;
84316
84317 asmlinkage void do_softirq(void);
84318 asmlinkage void __do_softirq(void);
84319@@ -435,7 +435,7 @@ static inline void do_softirq_own_stack(void)
84320 }
84321 #endif
84322
84323-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
84324+extern void open_softirq(int nr, void (*action)(void));
84325 extern void softirq_init(void);
84326 extern void __raise_softirq_irqoff(unsigned int nr);
84327
84328diff --git a/include/linux/iommu.h b/include/linux/iommu.h
84329index 38daa45..4de4317 100644
84330--- a/include/linux/iommu.h
84331+++ b/include/linux/iommu.h
84332@@ -147,7 +147,7 @@ struct iommu_ops {
84333
84334 unsigned long pgsize_bitmap;
84335 void *priv;
84336-};
84337+} __do_const;
84338
84339 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
84340 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
84341diff --git a/include/linux/ioport.h b/include/linux/ioport.h
84342index 2c525022..345b106 100644
84343--- a/include/linux/ioport.h
84344+++ b/include/linux/ioport.h
84345@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
84346 int adjust_resource(struct resource *res, resource_size_t start,
84347 resource_size_t size);
84348 resource_size_t resource_alignment(struct resource *res);
84349-static inline resource_size_t resource_size(const struct resource *res)
84350+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
84351 {
84352 return res->end - res->start + 1;
84353 }
84354diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
84355index 1eee6bc..9cf4912 100644
84356--- a/include/linux/ipc_namespace.h
84357+++ b/include/linux/ipc_namespace.h
84358@@ -60,7 +60,7 @@ struct ipc_namespace {
84359 struct user_namespace *user_ns;
84360
84361 struct ns_common ns;
84362-};
84363+} __randomize_layout;
84364
84365 extern struct ipc_namespace init_ipc_ns;
84366 extern atomic_t nr_ipc_ns;
84367diff --git a/include/linux/irq.h b/include/linux/irq.h
84368index d09ec7a..f373eb5 100644
84369--- a/include/linux/irq.h
84370+++ b/include/linux/irq.h
84371@@ -364,7 +364,8 @@ struct irq_chip {
84372 void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);
84373
84374 unsigned long flags;
84375-};
84376+} __do_const;
84377+typedef struct irq_chip __no_const irq_chip_no_const;
84378
84379 /*
84380 * irq_chip specific flags
84381diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
84382index 71d706d..817cdec 100644
84383--- a/include/linux/irqchip/arm-gic.h
84384+++ b/include/linux/irqchip/arm-gic.h
84385@@ -95,7 +95,7 @@
84386
84387 struct device_node;
84388
84389-extern struct irq_chip gic_arch_extn;
84390+extern irq_chip_no_const gic_arch_extn;
84391
84392 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
84393 u32 offset, struct device_node *);
84394diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
84395index dd1109f..4f4fdda 100644
84396--- a/include/linux/irqdesc.h
84397+++ b/include/linux/irqdesc.h
84398@@ -61,7 +61,7 @@ struct irq_desc {
84399 unsigned int irq_count; /* For detecting broken IRQs */
84400 unsigned long last_unhandled; /* Aging timer for unhandled count */
84401 unsigned int irqs_unhandled;
84402- atomic_t threads_handled;
84403+ atomic_unchecked_t threads_handled;
84404 int threads_handled_last;
84405 raw_spinlock_t lock;
84406 struct cpumask *percpu_enabled;
84407diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
84408index 676d730..5e05daec 100644
84409--- a/include/linux/irqdomain.h
84410+++ b/include/linux/irqdomain.h
84411@@ -40,6 +40,7 @@ struct device_node;
84412 struct irq_domain;
84413 struct of_device_id;
84414 struct irq_chip;
84415+struct irq_chip_no_const;
84416 struct irq_data;
84417
84418 /* Number of irqs reserved for a legacy isa controller */
84419diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
84420index c367cbd..c9b79e6 100644
84421--- a/include/linux/jiffies.h
84422+++ b/include/linux/jiffies.h
84423@@ -280,20 +280,20 @@ extern unsigned long preset_lpj;
84424 /*
84425 * Convert various time units to each other:
84426 */
84427-extern unsigned int jiffies_to_msecs(const unsigned long j);
84428-extern unsigned int jiffies_to_usecs(const unsigned long j);
84429+extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
84430+extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
84431
84432-static inline u64 jiffies_to_nsecs(const unsigned long j)
84433+static inline u64 __intentional_overflow(-1) jiffies_to_nsecs(const unsigned long j)
84434 {
84435 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
84436 }
84437
84438-extern unsigned long msecs_to_jiffies(const unsigned int m);
84439-extern unsigned long usecs_to_jiffies(const unsigned int u);
84440+extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
84441+extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
84442 extern unsigned long timespec_to_jiffies(const struct timespec *value);
84443 extern void jiffies_to_timespec(const unsigned long jiffies,
84444- struct timespec *value);
84445-extern unsigned long timeval_to_jiffies(const struct timeval *value);
84446+ struct timespec *value) __intentional_overflow(-1);
84447+extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
84448 extern void jiffies_to_timeval(const unsigned long jiffies,
84449 struct timeval *value);
84450
84451diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
84452index 6883e19..e854fcb 100644
84453--- a/include/linux/kallsyms.h
84454+++ b/include/linux/kallsyms.h
84455@@ -15,7 +15,8 @@
84456
84457 struct module;
84458
84459-#ifdef CONFIG_KALLSYMS
84460+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
84461+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
84462 /* Lookup the address for a symbol. Returns 0 if not found. */
84463 unsigned long kallsyms_lookup_name(const char *name);
84464
84465@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
84466 /* Stupid that this does nothing, but I didn't create this mess. */
84467 #define __print_symbol(fmt, addr)
84468 #endif /*CONFIG_KALLSYMS*/
84469+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
84470+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
84471+extern unsigned long kallsyms_lookup_name(const char *name);
84472+extern void __print_symbol(const char *fmt, unsigned long address);
84473+extern int sprint_backtrace(char *buffer, unsigned long address);
84474+extern int sprint_symbol(char *buffer, unsigned long address);
84475+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
84476+const char *kallsyms_lookup(unsigned long addr,
84477+ unsigned long *symbolsize,
84478+ unsigned long *offset,
84479+ char **modname, char *namebuf);
84480+extern int kallsyms_lookup_size_offset(unsigned long addr,
84481+ unsigned long *symbolsize,
84482+ unsigned long *offset);
84483+#endif
84484
84485 /* This macro allows us to keep printk typechecking */
84486 static __printf(1, 2)
84487diff --git a/include/linux/kernel.h b/include/linux/kernel.h
84488index d6d630d..feea1f5 100644
84489--- a/include/linux/kernel.h
84490+++ b/include/linux/kernel.h
84491@@ -378,7 +378,7 @@ static inline int __must_check kstrtos32_from_user(const char __user *s, size_t
84492 /* Obsolete, do not use. Use kstrto<foo> instead */
84493
84494 extern unsigned long simple_strtoul(const char *,char **,unsigned int);
84495-extern long simple_strtol(const char *,char **,unsigned int);
84496+extern long simple_strtol(const char *,char **,unsigned int) __intentional_overflow(-1);
84497 extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
84498 extern long long simple_strtoll(const char *,char **,unsigned int);
84499
84500diff --git a/include/linux/key-type.h b/include/linux/key-type.h
84501index ff9f1d3..6712be5 100644
84502--- a/include/linux/key-type.h
84503+++ b/include/linux/key-type.h
84504@@ -152,7 +152,7 @@ struct key_type {
84505 /* internal fields */
84506 struct list_head link; /* link in types list */
84507 struct lock_class_key lock_class; /* key->sem lock class */
84508-};
84509+} __do_const;
84510
84511 extern struct key_type key_type_keyring;
84512
84513diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
84514index e465bb1..19f605fd 100644
84515--- a/include/linux/kgdb.h
84516+++ b/include/linux/kgdb.h
84517@@ -52,7 +52,7 @@ extern int kgdb_connected;
84518 extern int kgdb_io_module_registered;
84519
84520 extern atomic_t kgdb_setting_breakpoint;
84521-extern atomic_t kgdb_cpu_doing_single_step;
84522+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
84523
84524 extern struct task_struct *kgdb_usethread;
84525 extern struct task_struct *kgdb_contthread;
84526@@ -254,7 +254,7 @@ struct kgdb_arch {
84527 void (*correct_hw_break)(void);
84528
84529 void (*enable_nmi)(bool on);
84530-};
84531+} __do_const;
84532
84533 /**
84534 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
84535@@ -279,7 +279,7 @@ struct kgdb_io {
84536 void (*pre_exception) (void);
84537 void (*post_exception) (void);
84538 int is_console;
84539-};
84540+} __do_const;
84541
84542 extern struct kgdb_arch arch_kgdb_ops;
84543
84544diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
84545index e705467..a92471d 100644
84546--- a/include/linux/kmemleak.h
84547+++ b/include/linux/kmemleak.h
84548@@ -27,7 +27,7 @@
84549
84550 extern void kmemleak_init(void) __ref;
84551 extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
84552- gfp_t gfp) __ref;
84553+ gfp_t gfp) __ref __size_overflow(2);
84554 extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref;
84555 extern void kmemleak_free(const void *ptr) __ref;
84556 extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
84557@@ -62,7 +62,7 @@ static inline void kmemleak_erase(void **ptr)
84558 static inline void kmemleak_init(void)
84559 {
84560 }
84561-static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count,
84562+static inline void __size_overflow(2) kmemleak_alloc(const void *ptr, size_t size, int min_count,
84563 gfp_t gfp)
84564 {
84565 }
84566diff --git a/include/linux/kmod.h b/include/linux/kmod.h
84567index 0555cc6..40116ce 100644
84568--- a/include/linux/kmod.h
84569+++ b/include/linux/kmod.h
84570@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
84571 * usually useless though. */
84572 extern __printf(2, 3)
84573 int __request_module(bool wait, const char *name, ...);
84574+extern __printf(3, 4)
84575+int ___request_module(bool wait, char *param_name, const char *name, ...);
84576 #define request_module(mod...) __request_module(true, mod)
84577 #define request_module_nowait(mod...) __request_module(false, mod)
84578 #define try_then_request_module(x, mod...) \
84579@@ -57,6 +59,9 @@ struct subprocess_info {
84580 struct work_struct work;
84581 struct completion *complete;
84582 char *path;
84583+#ifdef CONFIG_GRKERNSEC
84584+ char *origpath;
84585+#endif
84586 char **argv;
84587 char **envp;
84588 int wait;
84589diff --git a/include/linux/kobject.h b/include/linux/kobject.h
84590index 2d61b90..a1d0a13 100644
84591--- a/include/linux/kobject.h
84592+++ b/include/linux/kobject.h
84593@@ -118,7 +118,7 @@ struct kobj_type {
84594 struct attribute **default_attrs;
84595 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
84596 const void *(*namespace)(struct kobject *kobj);
84597-};
84598+} __do_const;
84599
84600 struct kobj_uevent_env {
84601 char *argv[3];
84602@@ -142,6 +142,7 @@ struct kobj_attribute {
84603 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
84604 const char *buf, size_t count);
84605 };
84606+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
84607
84608 extern const struct sysfs_ops kobj_sysfs_ops;
84609
84610@@ -169,7 +170,7 @@ struct kset {
84611 spinlock_t list_lock;
84612 struct kobject kobj;
84613 const struct kset_uevent_ops *uevent_ops;
84614-};
84615+} __randomize_layout;
84616
84617 extern void kset_init(struct kset *kset);
84618 extern int __must_check kset_register(struct kset *kset);
84619diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
84620index df32d25..fb52e27 100644
84621--- a/include/linux/kobject_ns.h
84622+++ b/include/linux/kobject_ns.h
84623@@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
84624 const void *(*netlink_ns)(struct sock *sk);
84625 const void *(*initial_ns)(void);
84626 void (*drop_ns)(void *);
84627-};
84628+} __do_const;
84629
84630 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
84631 int kobj_ns_type_registered(enum kobj_ns_type type);
84632diff --git a/include/linux/kref.h b/include/linux/kref.h
84633index 484604d..0f6c5b6 100644
84634--- a/include/linux/kref.h
84635+++ b/include/linux/kref.h
84636@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
84637 static inline int kref_sub(struct kref *kref, unsigned int count,
84638 void (*release)(struct kref *kref))
84639 {
84640- WARN_ON(release == NULL);
84641+ BUG_ON(release == NULL);
84642
84643 if (atomic_sub_and_test((int) count, &kref->refcount)) {
84644 release(kref);
84645diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
84646index d12b210..d91fd76 100644
84647--- a/include/linux/kvm_host.h
84648+++ b/include/linux/kvm_host.h
84649@@ -455,7 +455,7 @@ static inline void kvm_irqfd_exit(void)
84650 {
84651 }
84652 #endif
84653-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
84654+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
84655 struct module *module);
84656 void kvm_exit(void);
84657
84658@@ -633,7 +633,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
84659 struct kvm_guest_debug *dbg);
84660 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
84661
84662-int kvm_arch_init(void *opaque);
84663+int kvm_arch_init(const void *opaque);
84664 void kvm_arch_exit(void);
84665
84666 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
84667diff --git a/include/linux/libata.h b/include/linux/libata.h
84668index 6b08cc1..248c5e9 100644
84669--- a/include/linux/libata.h
84670+++ b/include/linux/libata.h
84671@@ -980,7 +980,7 @@ struct ata_port_operations {
84672 * fields must be pointers.
84673 */
84674 const struct ata_port_operations *inherits;
84675-};
84676+} __do_const;
84677
84678 struct ata_port_info {
84679 unsigned long flags;
84680diff --git a/include/linux/linkage.h b/include/linux/linkage.h
84681index a6a42dd..6c5ebce 100644
84682--- a/include/linux/linkage.h
84683+++ b/include/linux/linkage.h
84684@@ -36,6 +36,7 @@
84685 #endif
84686
84687 #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
84688+#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
84689 #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
84690
84691 /*
84692diff --git a/include/linux/list.h b/include/linux/list.h
84693index feb773c..98f3075 100644
84694--- a/include/linux/list.h
84695+++ b/include/linux/list.h
84696@@ -113,6 +113,19 @@ extern void __list_del_entry(struct list_head *entry);
84697 extern void list_del(struct list_head *entry);
84698 #endif
84699
84700+extern void __pax_list_add(struct list_head *new,
84701+ struct list_head *prev,
84702+ struct list_head *next);
84703+static inline void pax_list_add(struct list_head *new, struct list_head *head)
84704+{
84705+ __pax_list_add(new, head, head->next);
84706+}
84707+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
84708+{
84709+ __pax_list_add(new, head->prev, head);
84710+}
84711+extern void pax_list_del(struct list_head *entry);
84712+
84713 /**
84714 * list_replace - replace old entry by new one
84715 * @old : the element to be replaced
84716@@ -146,6 +159,8 @@ static inline void list_del_init(struct list_head *entry)
84717 INIT_LIST_HEAD(entry);
84718 }
84719
84720+extern void pax_list_del_init(struct list_head *entry);
84721+
84722 /**
84723 * list_move - delete from one list and add as another's head
84724 * @list: the entry to move
84725diff --git a/include/linux/lockref.h b/include/linux/lockref.h
84726index b10b122..d37b3de 100644
84727--- a/include/linux/lockref.h
84728+++ b/include/linux/lockref.h
84729@@ -28,7 +28,7 @@ struct lockref {
84730 #endif
84731 struct {
84732 spinlock_t lock;
84733- int count;
84734+ atomic_t count;
84735 };
84736 };
84737 };
84738@@ -43,9 +43,29 @@ extern void lockref_mark_dead(struct lockref *);
84739 extern int lockref_get_not_dead(struct lockref *);
84740
84741 /* Must be called under spinlock for reliable results */
84742-static inline int __lockref_is_dead(const struct lockref *l)
84743+static inline int __lockref_is_dead(const struct lockref *lockref)
84744 {
84745- return ((int)l->count < 0);
84746+ return atomic_read(&lockref->count) < 0;
84747+}
84748+
84749+static inline int __lockref_read(const struct lockref *lockref)
84750+{
84751+ return atomic_read(&lockref->count);
84752+}
84753+
84754+static inline void __lockref_set(struct lockref *lockref, int count)
84755+{
84756+ atomic_set(&lockref->count, count);
84757+}
84758+
84759+static inline void __lockref_inc(struct lockref *lockref)
84760+{
84761+ atomic_inc(&lockref->count);
84762+}
84763+
84764+static inline void __lockref_dec(struct lockref *lockref)
84765+{
84766+ atomic_dec(&lockref->count);
84767 }
84768
84769 #endif /* __LINUX_LOCKREF_H */
84770diff --git a/include/linux/math64.h b/include/linux/math64.h
84771index c45c089..298841c 100644
84772--- a/include/linux/math64.h
84773+++ b/include/linux/math64.h
84774@@ -15,7 +15,7 @@
84775 * This is commonly provided by 32bit archs to provide an optimized 64bit
84776 * divide.
84777 */
84778-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
84779+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
84780 {
84781 *remainder = dividend % divisor;
84782 return dividend / divisor;
84783@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
84784 /**
84785 * div64_u64 - unsigned 64bit divide with 64bit divisor
84786 */
84787-static inline u64 div64_u64(u64 dividend, u64 divisor)
84788+static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
84789 {
84790 return dividend / divisor;
84791 }
84792@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
84793 #define div64_ul(x, y) div_u64((x), (y))
84794
84795 #ifndef div_u64_rem
84796-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
84797+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
84798 {
84799 *remainder = do_div(dividend, divisor);
84800 return dividend;
84801@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
84802 #endif
84803
84804 #ifndef div64_u64
84805-extern u64 div64_u64(u64 dividend, u64 divisor);
84806+extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
84807 #endif
84808
84809 #ifndef div64_s64
84810@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
84811 * divide.
84812 */
84813 #ifndef div_u64
84814-static inline u64 div_u64(u64 dividend, u32 divisor)
84815+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
84816 {
84817 u32 remainder;
84818 return div_u64_rem(dividend, divisor, &remainder);
84819diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
84820index 3d385c8..deacb6a 100644
84821--- a/include/linux/mempolicy.h
84822+++ b/include/linux/mempolicy.h
84823@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
84824 }
84825
84826 #define vma_policy(vma) ((vma)->vm_policy)
84827+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
84828+{
84829+ vma->vm_policy = pol;
84830+}
84831
84832 static inline void mpol_get(struct mempolicy *pol)
84833 {
84834@@ -229,6 +233,9 @@ static inline void mpol_free_shared_policy(struct shared_policy *p)
84835 }
84836
84837 #define vma_policy(vma) NULL
84838+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
84839+{
84840+}
84841
84842 static inline int
84843 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
84844diff --git a/include/linux/mm.h b/include/linux/mm.h
84845index 47a9392..ef645bc 100644
84846--- a/include/linux/mm.h
84847+++ b/include/linux/mm.h
84848@@ -135,6 +135,11 @@ extern unsigned int kobjsize(const void *objp);
84849
84850 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
84851 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
84852+
84853+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
84854+#define VM_PAGEEXEC 0x00080000 /* vma->vm_page_prot needs special handling */
84855+#endif
84856+
84857 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
84858 #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
84859 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
84860@@ -254,8 +259,8 @@ struct vm_operations_struct {
84861 /* called by access_process_vm when get_user_pages() fails, typically
84862 * for use by special VMAs that can switch between memory and hardware
84863 */
84864- int (*access)(struct vm_area_struct *vma, unsigned long addr,
84865- void *buf, int len, int write);
84866+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
84867+ void *buf, size_t len, int write);
84868
84869 /* Called by the /proc/PID/maps code to ask the vma whether it
84870 * has a special name. Returning non-NULL will also cause this
84871@@ -293,6 +298,7 @@ struct vm_operations_struct {
84872 struct page *(*find_special_page)(struct vm_area_struct *vma,
84873 unsigned long addr);
84874 };
84875+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
84876
84877 struct mmu_gather;
84878 struct inode;
84879@@ -1213,8 +1219,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
84880 unsigned long *pfn);
84881 int follow_phys(struct vm_area_struct *vma, unsigned long address,
84882 unsigned int flags, unsigned long *prot, resource_size_t *phys);
84883-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
84884- void *buf, int len, int write);
84885+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
84886+ void *buf, size_t len, int write);
84887
84888 static inline void unmap_shared_mapping_range(struct address_space *mapping,
84889 loff_t const holebegin, loff_t const holelen)
84890@@ -1254,9 +1260,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
84891 }
84892 #endif
84893
84894-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
84895-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
84896- void *buf, int len, int write);
84897+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
84898+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
84899+ void *buf, size_t len, int write);
84900
84901 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
84902 unsigned long start, unsigned long nr_pages,
84903@@ -1299,34 +1305,6 @@ int set_page_dirty_lock(struct page *page);
84904 int clear_page_dirty_for_io(struct page *page);
84905 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
84906
84907-/* Is the vma a continuation of the stack vma above it? */
84908-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
84909-{
84910- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
84911-}
84912-
84913-static inline int stack_guard_page_start(struct vm_area_struct *vma,
84914- unsigned long addr)
84915-{
84916- return (vma->vm_flags & VM_GROWSDOWN) &&
84917- (vma->vm_start == addr) &&
84918- !vma_growsdown(vma->vm_prev, addr);
84919-}
84920-
84921-/* Is the vma a continuation of the stack vma below it? */
84922-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
84923-{
84924- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
84925-}
84926-
84927-static inline int stack_guard_page_end(struct vm_area_struct *vma,
84928- unsigned long addr)
84929-{
84930- return (vma->vm_flags & VM_GROWSUP) &&
84931- (vma->vm_end == addr) &&
84932- !vma_growsup(vma->vm_next, addr);
84933-}
84934-
84935 extern struct task_struct *task_of_stack(struct task_struct *task,
84936 struct vm_area_struct *vma, bool in_group);
84937
84938@@ -1449,8 +1427,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
84939 {
84940 return 0;
84941 }
84942+
84943+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
84944+ unsigned long address)
84945+{
84946+ return 0;
84947+}
84948 #else
84949 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
84950+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
84951 #endif
84952
84953 #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
84954@@ -1460,6 +1445,12 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
84955 return 0;
84956 }
84957
84958+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
84959+ unsigned long address)
84960+{
84961+ return 0;
84962+}
84963+
84964 static inline void mm_nr_pmds_init(struct mm_struct *mm) {}
84965
84966 static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
84967@@ -1472,6 +1463,7 @@ static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
84968
84969 #else
84970 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
84971+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
84972
84973 static inline void mm_nr_pmds_init(struct mm_struct *mm)
84974 {
84975@@ -1509,11 +1501,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
84976 NULL: pud_offset(pgd, address);
84977 }
84978
84979+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
84980+{
84981+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
84982+ NULL: pud_offset(pgd, address);
84983+}
84984+
84985 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
84986 {
84987 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
84988 NULL: pmd_offset(pud, address);
84989 }
84990+
84991+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
84992+{
84993+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
84994+ NULL: pmd_offset(pud, address);
84995+}
84996 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
84997
84998 #if USE_SPLIT_PTE_PTLOCKS
84999@@ -1890,12 +1894,23 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
85000 bool *need_rmap_locks);
85001 extern void exit_mmap(struct mm_struct *);
85002
85003+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
85004+extern void gr_learn_resource(const struct task_struct *task, const int res,
85005+ const unsigned long wanted, const int gt);
85006+#else
85007+static inline void gr_learn_resource(const struct task_struct *task, const int res,
85008+ const unsigned long wanted, const int gt)
85009+{
85010+}
85011+#endif
85012+
85013 static inline int check_data_rlimit(unsigned long rlim,
85014 unsigned long new,
85015 unsigned long start,
85016 unsigned long end_data,
85017 unsigned long start_data)
85018 {
85019+ gr_learn_resource(current, RLIMIT_DATA, (new - start) + (end_data - start_data), 1);
85020 if (rlim < RLIM_INFINITY) {
85021 if (((new - start) + (end_data - start_data)) > rlim)
85022 return -ENOSPC;
85023@@ -1920,7 +1935,7 @@ extern int install_special_mapping(struct mm_struct *mm,
85024 unsigned long addr, unsigned long len,
85025 unsigned long flags, struct page **pages);
85026
85027-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
85028+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
85029
85030 extern unsigned long mmap_region(struct file *file, unsigned long addr,
85031 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
85032@@ -1928,6 +1943,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
85033 unsigned long len, unsigned long prot, unsigned long flags,
85034 unsigned long pgoff, unsigned long *populate);
85035 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
85036+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
85037
85038 #ifdef CONFIG_MMU
85039 extern int __mm_populate(unsigned long addr, unsigned long len,
85040@@ -1956,10 +1972,11 @@ struct vm_unmapped_area_info {
85041 unsigned long high_limit;
85042 unsigned long align_mask;
85043 unsigned long align_offset;
85044+ unsigned long threadstack_offset;
85045 };
85046
85047-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
85048-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
85049+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
85050+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
85051
85052 /*
85053 * Search for an unmapped address range.
85054@@ -1971,7 +1988,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
85055 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
85056 */
85057 static inline unsigned long
85058-vm_unmapped_area(struct vm_unmapped_area_info *info)
85059+vm_unmapped_area(const struct vm_unmapped_area_info *info)
85060 {
85061 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
85062 return unmapped_area(info);
85063@@ -2033,6 +2050,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
85064 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
85065 struct vm_area_struct **pprev);
85066
85067+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
85068+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
85069+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
85070+
85071 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
85072 NULL if none. Assume start_addr < end_addr. */
85073 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
85074@@ -2062,10 +2083,10 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
85075 }
85076
85077 #ifdef CONFIG_MMU
85078-pgprot_t vm_get_page_prot(unsigned long vm_flags);
85079+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
85080 void vma_set_page_prot(struct vm_area_struct *vma);
85081 #else
85082-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
85083+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
85084 {
85085 return __pgprot(0);
85086 }
85087@@ -2127,6 +2148,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
85088 static inline void vm_stat_account(struct mm_struct *mm,
85089 unsigned long flags, struct file *file, long pages)
85090 {
85091+
85092+#ifdef CONFIG_PAX_RANDMMAP
85093+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
85094+#endif
85095+
85096 mm->total_vm += pages;
85097 }
85098 #endif /* CONFIG_PROC_FS */
85099@@ -2229,7 +2255,7 @@ extern int unpoison_memory(unsigned long pfn);
85100 extern int sysctl_memory_failure_early_kill;
85101 extern int sysctl_memory_failure_recovery;
85102 extern void shake_page(struct page *p, int access);
85103-extern atomic_long_t num_poisoned_pages;
85104+extern atomic_long_unchecked_t num_poisoned_pages;
85105 extern int soft_offline_page(struct page *page, int flags);
85106
85107 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
85108@@ -2280,5 +2306,11 @@ void __init setup_nr_node_ids(void);
85109 static inline void setup_nr_node_ids(void) {}
85110 #endif
85111
85112+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
85113+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
85114+#else
85115+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
85116+#endif
85117+
85118 #endif /* __KERNEL__ */
85119 #endif /* _LINUX_MM_H */
85120diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
85121index 199a03a..7328440 100644
85122--- a/include/linux/mm_types.h
85123+++ b/include/linux/mm_types.h
85124@@ -313,7 +313,9 @@ struct vm_area_struct {
85125 #ifdef CONFIG_NUMA
85126 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
85127 #endif
85128-};
85129+
85130+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
85131+} __randomize_layout;
85132
85133 struct core_thread {
85134 struct task_struct *task;
85135@@ -464,7 +466,25 @@ struct mm_struct {
85136 /* address of the bounds directory */
85137 void __user *bd_addr;
85138 #endif
85139-};
85140+
85141+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
85142+ unsigned long pax_flags;
85143+#endif
85144+
85145+#ifdef CONFIG_PAX_DLRESOLVE
85146+ unsigned long call_dl_resolve;
85147+#endif
85148+
85149+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
85150+ unsigned long call_syscall;
85151+#endif
85152+
85153+#ifdef CONFIG_PAX_ASLR
85154+ unsigned long delta_mmap; /* randomized offset */
85155+ unsigned long delta_stack; /* randomized offset */
85156+#endif
85157+
85158+} __randomize_layout;
85159
85160 static inline void mm_init_cpumask(struct mm_struct *mm)
85161 {
85162diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
85163index 160448f..7b332b7 100644
85164--- a/include/linux/mmc/core.h
85165+++ b/include/linux/mmc/core.h
85166@@ -79,7 +79,7 @@ struct mmc_command {
85167 #define mmc_cmd_type(cmd) ((cmd)->flags & MMC_CMD_MASK)
85168
85169 unsigned int retries; /* max number of retries */
85170- unsigned int error; /* command error */
85171+ int error; /* command error */
85172
85173 /*
85174 * Standard errno values are used for errors, but some have specific
85175diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
85176index c5d5278..f0b68c8 100644
85177--- a/include/linux/mmiotrace.h
85178+++ b/include/linux/mmiotrace.h
85179@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
85180 /* Called from ioremap.c */
85181 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
85182 void __iomem *addr);
85183-extern void mmiotrace_iounmap(volatile void __iomem *addr);
85184+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
85185
85186 /* For anyone to insert markers. Remember trailing newline. */
85187 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
85188@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
85189 {
85190 }
85191
85192-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
85193+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
85194 {
85195 }
85196
85197diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
85198index 2782df4..abe756e 100644
85199--- a/include/linux/mmzone.h
85200+++ b/include/linux/mmzone.h
85201@@ -526,7 +526,7 @@ struct zone {
85202
85203 ZONE_PADDING(_pad3_)
85204 /* Zone statistics */
85205- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
85206+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
85207 } ____cacheline_internodealigned_in_smp;
85208
85209 enum zone_flags {
85210diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
85211index e530533..c9620c7 100644
85212--- a/include/linux/mod_devicetable.h
85213+++ b/include/linux/mod_devicetable.h
85214@@ -139,7 +139,7 @@ struct usb_device_id {
85215 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
85216 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
85217
85218-#define HID_ANY_ID (~0)
85219+#define HID_ANY_ID (~0U)
85220 #define HID_BUS_ANY 0xffff
85221 #define HID_GROUP_ANY 0x0000
85222
85223@@ -470,7 +470,7 @@ struct dmi_system_id {
85224 const char *ident;
85225 struct dmi_strmatch matches[4];
85226 void *driver_data;
85227-};
85228+} __do_const;
85229 /*
85230 * struct dmi_device_id appears during expansion of
85231 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
85232diff --git a/include/linux/module.h b/include/linux/module.h
85233index b03485b..a26974f 100644
85234--- a/include/linux/module.h
85235+++ b/include/linux/module.h
85236@@ -17,9 +17,11 @@
85237 #include <linux/moduleparam.h>
85238 #include <linux/jump_label.h>
85239 #include <linux/export.h>
85240+#include <linux/fs.h>
85241
85242 #include <linux/percpu.h>
85243 #include <asm/module.h>
85244+#include <asm/pgtable.h>
85245
85246 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
85247 #define MODULE_SIG_STRING "~Module signature appended~\n"
85248@@ -42,7 +44,7 @@ struct module_kobject {
85249 struct kobject *drivers_dir;
85250 struct module_param_attrs *mp;
85251 struct completion *kobj_completion;
85252-};
85253+} __randomize_layout;
85254
85255 struct module_attribute {
85256 struct attribute attr;
85257@@ -54,12 +56,13 @@ struct module_attribute {
85258 int (*test)(struct module *);
85259 void (*free)(struct module *);
85260 };
85261+typedef struct module_attribute __no_const module_attribute_no_const;
85262
85263 struct module_version_attribute {
85264 struct module_attribute mattr;
85265 const char *module_name;
85266 const char *version;
85267-} __attribute__ ((__aligned__(sizeof(void *))));
85268+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
85269
85270 extern ssize_t __modver_version_show(struct module_attribute *,
85271 struct module_kobject *, char *);
85272@@ -221,7 +224,7 @@ struct module {
85273
85274 /* Sysfs stuff. */
85275 struct module_kobject mkobj;
85276- struct module_attribute *modinfo_attrs;
85277+ module_attribute_no_const *modinfo_attrs;
85278 const char *version;
85279 const char *srcversion;
85280 struct kobject *holders_dir;
85281@@ -270,19 +273,16 @@ struct module {
85282 int (*init)(void);
85283
85284 /* If this is non-NULL, vfree after init() returns */
85285- void *module_init;
85286+ void *module_init_rx, *module_init_rw;
85287
85288 /* Here is the actual code + data, vfree'd on unload. */
85289- void *module_core;
85290+ void *module_core_rx, *module_core_rw;
85291
85292 /* Here are the sizes of the init and core sections */
85293- unsigned int init_size, core_size;
85294+ unsigned int init_size_rw, core_size_rw;
85295
85296 /* The size of the executable code in each section. */
85297- unsigned int init_text_size, core_text_size;
85298-
85299- /* Size of RO sections of the module (text+rodata) */
85300- unsigned int init_ro_size, core_ro_size;
85301+ unsigned int init_size_rx, core_size_rx;
85302
85303 /* Arch-specific module values */
85304 struct mod_arch_specific arch;
85305@@ -338,6 +338,10 @@ struct module {
85306 #ifdef CONFIG_EVENT_TRACING
85307 struct ftrace_event_call **trace_events;
85308 unsigned int num_trace_events;
85309+ struct file_operations trace_id;
85310+ struct file_operations trace_enable;
85311+ struct file_operations trace_format;
85312+ struct file_operations trace_filter;
85313 #endif
85314 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
85315 unsigned int num_ftrace_callsites;
85316@@ -365,7 +369,7 @@ struct module {
85317 ctor_fn_t *ctors;
85318 unsigned int num_ctors;
85319 #endif
85320-};
85321+} __randomize_layout;
85322 #ifndef MODULE_ARCH_INIT
85323 #define MODULE_ARCH_INIT {}
85324 #endif
85325@@ -386,18 +390,48 @@ bool is_module_address(unsigned long addr);
85326 bool is_module_percpu_address(unsigned long addr);
85327 bool is_module_text_address(unsigned long addr);
85328
85329+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
85330+{
85331+
85332+#ifdef CONFIG_PAX_KERNEXEC
85333+ if (ktla_ktva(addr) >= (unsigned long)start &&
85334+ ktla_ktva(addr) < (unsigned long)start + size)
85335+ return 1;
85336+#endif
85337+
85338+ return ((void *)addr >= start && (void *)addr < start + size);
85339+}
85340+
85341+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
85342+{
85343+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
85344+}
85345+
85346+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
85347+{
85348+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
85349+}
85350+
85351+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
85352+{
85353+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
85354+}
85355+
85356+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
85357+{
85358+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
85359+}
85360+
85361 static inline bool within_module_core(unsigned long addr,
85362 const struct module *mod)
85363 {
85364- return (unsigned long)mod->module_core <= addr &&
85365- addr < (unsigned long)mod->module_core + mod->core_size;
85366+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
85367 }
85368
85369 static inline bool within_module_init(unsigned long addr,
85370 const struct module *mod)
85371 {
85372- return (unsigned long)mod->module_init <= addr &&
85373- addr < (unsigned long)mod->module_init + mod->init_size;
85374+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
85375 }
85376
85377 static inline bool within_module(unsigned long addr, const struct module *mod)
85378diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
85379index 4d0cb9b..3169ac7 100644
85380--- a/include/linux/moduleloader.h
85381+++ b/include/linux/moduleloader.h
85382@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
85383 sections. Returns NULL on failure. */
85384 void *module_alloc(unsigned long size);
85385
85386+#ifdef CONFIG_PAX_KERNEXEC
85387+void *module_alloc_exec(unsigned long size);
85388+#else
85389+#define module_alloc_exec(x) module_alloc(x)
85390+#endif
85391+
85392 /* Free memory returned from module_alloc. */
85393 void module_memfree(void *module_region);
85394
85395+#ifdef CONFIG_PAX_KERNEXEC
85396+void module_memfree_exec(void *module_region);
85397+#else
85398+#define module_memfree_exec(x) module_memfree((x))
85399+#endif
85400+
85401 /*
85402 * Apply the given relocation to the (simplified) ELF. Return -error
85403 * or 0.
85404@@ -45,8 +57,10 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
85405 unsigned int relsec,
85406 struct module *me)
85407 {
85408+#ifdef CONFIG_MODULES
85409 printk(KERN_ERR "module %s: REL relocation unsupported\n",
85410 module_name(me));
85411+#endif
85412 return -ENOEXEC;
85413 }
85414 #endif
85415@@ -68,8 +82,10 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
85416 unsigned int relsec,
85417 struct module *me)
85418 {
85419+#ifdef CONFIG_MODULES
85420 printk(KERN_ERR "module %s: REL relocation unsupported\n",
85421 module_name(me));
85422+#endif
85423 return -ENOEXEC;
85424 }
85425 #endif
85426diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
85427index 1c9effa..1160bdd 100644
85428--- a/include/linux/moduleparam.h
85429+++ b/include/linux/moduleparam.h
85430@@ -323,7 +323,7 @@ static inline void __kernel_param_unlock(void)
85431 * @len is usually just sizeof(string).
85432 */
85433 #define module_param_string(name, string, len, perm) \
85434- static const struct kparam_string __param_string_##name \
85435+ static const struct kparam_string __param_string_##name __used \
85436 = { len, string }; \
85437 __module_param_call(MODULE_PARAM_PREFIX, name, \
85438 &param_ops_string, \
85439@@ -467,7 +467,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
85440 */
85441 #define module_param_array_named(name, array, type, nump, perm) \
85442 param_check_##type(name, &(array)[0]); \
85443- static const struct kparam_array __param_arr_##name \
85444+ static const struct kparam_array __param_arr_##name __used \
85445 = { .max = ARRAY_SIZE(array), .num = nump, \
85446 .ops = &param_ops_##type, \
85447 .elemsize = sizeof(array[0]), .elem = array }; \
85448diff --git a/include/linux/mount.h b/include/linux/mount.h
85449index 564beee..653be6f 100644
85450--- a/include/linux/mount.h
85451+++ b/include/linux/mount.h
85452@@ -67,7 +67,7 @@ struct vfsmount {
85453 struct dentry *mnt_root; /* root of the mounted tree */
85454 struct super_block *mnt_sb; /* pointer to superblock */
85455 int mnt_flags;
85456-};
85457+} __randomize_layout;
85458
85459 struct file; /* forward dec */
85460 struct path;
85461diff --git a/include/linux/namei.h b/include/linux/namei.h
85462index c899077..b9a2010 100644
85463--- a/include/linux/namei.h
85464+++ b/include/linux/namei.h
85465@@ -71,8 +71,8 @@ extern struct dentry *lock_rename(struct dentry *, struct dentry *);
85466 extern void unlock_rename(struct dentry *, struct dentry *);
85467
85468 extern void nd_jump_link(struct nameidata *nd, struct path *path);
85469-extern void nd_set_link(struct nameidata *nd, char *path);
85470-extern char *nd_get_link(struct nameidata *nd);
85471+extern void nd_set_link(struct nameidata *nd, const char *path);
85472+extern const char *nd_get_link(const struct nameidata *nd);
85473
85474 static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
85475 {
85476diff --git a/include/linux/net.h b/include/linux/net.h
85477index 17d8339..81656c0 100644
85478--- a/include/linux/net.h
85479+++ b/include/linux/net.h
85480@@ -192,7 +192,7 @@ struct net_proto_family {
85481 int (*create)(struct net *net, struct socket *sock,
85482 int protocol, int kern);
85483 struct module *owner;
85484-};
85485+} __do_const;
85486
85487 struct iovec;
85488 struct kvec;
85489diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
85490index 2787388..1dd8e88 100644
85491--- a/include/linux/netdevice.h
85492+++ b/include/linux/netdevice.h
85493@@ -1198,6 +1198,7 @@ struct net_device_ops {
85494 u8 state);
85495 #endif
85496 };
85497+typedef struct net_device_ops __no_const net_device_ops_no_const;
85498
85499 /**
85500 * enum net_device_priv_flags - &struct net_device priv_flags
85501@@ -1546,10 +1547,10 @@ struct net_device {
85502
85503 struct net_device_stats stats;
85504
85505- atomic_long_t rx_dropped;
85506- atomic_long_t tx_dropped;
85507+ atomic_long_unchecked_t rx_dropped;
85508+ atomic_long_unchecked_t tx_dropped;
85509
85510- atomic_t carrier_changes;
85511+ atomic_unchecked_t carrier_changes;
85512
85513 #ifdef CONFIG_WIRELESS_EXT
85514 const struct iw_handler_def * wireless_handlers;
85515diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
85516index 2517ece..0bbfcfb 100644
85517--- a/include/linux/netfilter.h
85518+++ b/include/linux/netfilter.h
85519@@ -85,7 +85,7 @@ struct nf_sockopt_ops {
85520 #endif
85521 /* Use the module struct to lock set/get code in place */
85522 struct module *owner;
85523-};
85524+} __do_const;
85525
85526 /* Function to register/unregister hook points. */
85527 int nf_register_hook(struct nf_hook_ops *reg);
85528diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
85529index e955d47..04a5338 100644
85530--- a/include/linux/netfilter/nfnetlink.h
85531+++ b/include/linux/netfilter/nfnetlink.h
85532@@ -19,7 +19,7 @@ struct nfnl_callback {
85533 const struct nlattr * const cda[]);
85534 const struct nla_policy *policy; /* netlink attribute policy */
85535 const u_int16_t attr_count; /* number of nlattr's */
85536-};
85537+} __do_const;
85538
85539 struct nfnetlink_subsystem {
85540 const char *name;
85541diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
85542new file mode 100644
85543index 0000000..33f4af8
85544--- /dev/null
85545+++ b/include/linux/netfilter/xt_gradm.h
85546@@ -0,0 +1,9 @@
85547+#ifndef _LINUX_NETFILTER_XT_GRADM_H
85548+#define _LINUX_NETFILTER_XT_GRADM_H 1
85549+
85550+struct xt_gradm_mtinfo {
85551+ __u16 flags;
85552+ __u16 invflags;
85553+};
85554+
85555+#endif
85556diff --git a/include/linux/nls.h b/include/linux/nls.h
85557index 520681b..2b7fabb 100644
85558--- a/include/linux/nls.h
85559+++ b/include/linux/nls.h
85560@@ -31,7 +31,7 @@ struct nls_table {
85561 const unsigned char *charset2upper;
85562 struct module *owner;
85563 struct nls_table *next;
85564-};
85565+} __do_const;
85566
85567 /* this value hold the maximum octet of charset */
85568 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
85569@@ -46,7 +46,7 @@ enum utf16_endian {
85570 /* nls_base.c */
85571 extern int __register_nls(struct nls_table *, struct module *);
85572 extern int unregister_nls(struct nls_table *);
85573-extern struct nls_table *load_nls(char *);
85574+extern struct nls_table *load_nls(const char *);
85575 extern void unload_nls(struct nls_table *);
85576 extern struct nls_table *load_nls_default(void);
85577 #define register_nls(nls) __register_nls((nls), THIS_MODULE)
85578diff --git a/include/linux/notifier.h b/include/linux/notifier.h
85579index d14a4c3..a078786 100644
85580--- a/include/linux/notifier.h
85581+++ b/include/linux/notifier.h
85582@@ -54,7 +54,8 @@ struct notifier_block {
85583 notifier_fn_t notifier_call;
85584 struct notifier_block __rcu *next;
85585 int priority;
85586-};
85587+} __do_const;
85588+typedef struct notifier_block __no_const notifier_block_no_const;
85589
85590 struct atomic_notifier_head {
85591 spinlock_t lock;
85592diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
85593index b2a0f15..4d7da32 100644
85594--- a/include/linux/oprofile.h
85595+++ b/include/linux/oprofile.h
85596@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
85597 int oprofilefs_create_ro_ulong(struct dentry * root,
85598 char const * name, ulong * val);
85599
85600-/** Create a file for read-only access to an atomic_t. */
85601+/** Create a file for read-only access to an atomic_unchecked_t. */
85602 int oprofilefs_create_ro_atomic(struct dentry * root,
85603- char const * name, atomic_t * val);
85604+ char const * name, atomic_unchecked_t * val);
85605
85606 /** create a directory */
85607 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
85608diff --git a/include/linux/padata.h b/include/linux/padata.h
85609index 4386946..f50c615 100644
85610--- a/include/linux/padata.h
85611+++ b/include/linux/padata.h
85612@@ -129,7 +129,7 @@ struct parallel_data {
85613 struct padata_serial_queue __percpu *squeue;
85614 atomic_t reorder_objects;
85615 atomic_t refcnt;
85616- atomic_t seq_nr;
85617+ atomic_unchecked_t seq_nr;
85618 struct padata_cpumask cpumask;
85619 spinlock_t lock ____cacheline_aligned;
85620 unsigned int processed;
85621diff --git a/include/linux/path.h b/include/linux/path.h
85622index d137218..be0c176 100644
85623--- a/include/linux/path.h
85624+++ b/include/linux/path.h
85625@@ -1,13 +1,15 @@
85626 #ifndef _LINUX_PATH_H
85627 #define _LINUX_PATH_H
85628
85629+#include <linux/compiler.h>
85630+
85631 struct dentry;
85632 struct vfsmount;
85633
85634 struct path {
85635 struct vfsmount *mnt;
85636 struct dentry *dentry;
85637-};
85638+} __randomize_layout;
85639
85640 extern void path_get(const struct path *);
85641 extern void path_put(const struct path *);
85642diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
85643index 8c78950..0d74ed9 100644
85644--- a/include/linux/pci_hotplug.h
85645+++ b/include/linux/pci_hotplug.h
85646@@ -71,7 +71,8 @@ struct hotplug_slot_ops {
85647 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
85648 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
85649 int (*reset_slot) (struct hotplug_slot *slot, int probe);
85650-};
85651+} __do_const;
85652+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
85653
85654 /**
85655 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
85656diff --git a/include/linux/percpu.h b/include/linux/percpu.h
85657index caebf2a..4c3ae9d 100644
85658--- a/include/linux/percpu.h
85659+++ b/include/linux/percpu.h
85660@@ -34,7 +34,7 @@
85661 * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or
85662 * larger than PERCPU_DYNAMIC_EARLY_SIZE.
85663 */
85664-#define PERCPU_DYNAMIC_EARLY_SLOTS 128
85665+#define PERCPU_DYNAMIC_EARLY_SLOTS 256
85666 #define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10)
85667
85668 /*
85669diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
85670index 2b62198..2b74233 100644
85671--- a/include/linux/perf_event.h
85672+++ b/include/linux/perf_event.h
85673@@ -343,8 +343,8 @@ struct perf_event {
85674
85675 enum perf_event_active_state state;
85676 unsigned int attach_state;
85677- local64_t count;
85678- atomic64_t child_count;
85679+ local64_t count; /* PaX: fix it one day */
85680+ atomic64_unchecked_t child_count;
85681
85682 /*
85683 * These are the total time in nanoseconds that the event
85684@@ -395,8 +395,8 @@ struct perf_event {
85685 * These accumulate total time (in nanoseconds) that children
85686 * events have been enabled and running, respectively.
85687 */
85688- atomic64_t child_total_time_enabled;
85689- atomic64_t child_total_time_running;
85690+ atomic64_unchecked_t child_total_time_enabled;
85691+ atomic64_unchecked_t child_total_time_running;
85692
85693 /*
85694 * Protect attach/detach and child_list:
85695@@ -752,7 +752,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
85696 entry->ip[entry->nr++] = ip;
85697 }
85698
85699-extern int sysctl_perf_event_paranoid;
85700+extern int sysctl_perf_event_legitimately_concerned;
85701 extern int sysctl_perf_event_mlock;
85702 extern int sysctl_perf_event_sample_rate;
85703 extern int sysctl_perf_cpu_time_max_percent;
85704@@ -767,19 +767,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
85705 loff_t *ppos);
85706
85707
85708+static inline bool perf_paranoid_any(void)
85709+{
85710+ return sysctl_perf_event_legitimately_concerned > 2;
85711+}
85712+
85713 static inline bool perf_paranoid_tracepoint_raw(void)
85714 {
85715- return sysctl_perf_event_paranoid > -1;
85716+ return sysctl_perf_event_legitimately_concerned > -1;
85717 }
85718
85719 static inline bool perf_paranoid_cpu(void)
85720 {
85721- return sysctl_perf_event_paranoid > 0;
85722+ return sysctl_perf_event_legitimately_concerned > 0;
85723 }
85724
85725 static inline bool perf_paranoid_kernel(void)
85726 {
85727- return sysctl_perf_event_paranoid > 1;
85728+ return sysctl_perf_event_legitimately_concerned > 1;
85729 }
85730
85731 extern void perf_event_init(void);
85732@@ -912,7 +917,7 @@ struct perf_pmu_events_attr {
85733 struct device_attribute attr;
85734 u64 id;
85735 const char *event_str;
85736-};
85737+} __do_const;
85738
85739 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
85740 char *page);
85741diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
85742index 918b117..7af374b7 100644
85743--- a/include/linux/pid_namespace.h
85744+++ b/include/linux/pid_namespace.h
85745@@ -45,7 +45,7 @@ struct pid_namespace {
85746 int hide_pid;
85747 int reboot; /* group exit code if this pidns was rebooted */
85748 struct ns_common ns;
85749-};
85750+} __randomize_layout;
85751
85752 extern struct pid_namespace init_pid_ns;
85753
85754diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
85755index eb8b8ac..62649e1 100644
85756--- a/include/linux/pipe_fs_i.h
85757+++ b/include/linux/pipe_fs_i.h
85758@@ -47,10 +47,10 @@ struct pipe_inode_info {
85759 struct mutex mutex;
85760 wait_queue_head_t wait;
85761 unsigned int nrbufs, curbuf, buffers;
85762- unsigned int readers;
85763- unsigned int writers;
85764- unsigned int files;
85765- unsigned int waiting_writers;
85766+ atomic_t readers;
85767+ atomic_t writers;
85768+ atomic_t files;
85769+ atomic_t waiting_writers;
85770 unsigned int r_counter;
85771 unsigned int w_counter;
85772 struct page *tmp_page;
85773diff --git a/include/linux/pm.h b/include/linux/pm.h
85774index e2f1be6..78a0506 100644
85775--- a/include/linux/pm.h
85776+++ b/include/linux/pm.h
85777@@ -608,6 +608,7 @@ struct dev_pm_domain {
85778 struct dev_pm_ops ops;
85779 void (*detach)(struct device *dev, bool power_off);
85780 };
85781+typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
85782
85783 /*
85784 * The PM_EVENT_ messages are also used by drivers implementing the legacy
85785diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
85786index 080e778..cbdaef7 100644
85787--- a/include/linux/pm_domain.h
85788+++ b/include/linux/pm_domain.h
85789@@ -39,11 +39,11 @@ struct gpd_dev_ops {
85790 int (*save_state)(struct device *dev);
85791 int (*restore_state)(struct device *dev);
85792 bool (*active_wakeup)(struct device *dev);
85793-};
85794+} __no_const;
85795
85796 struct gpd_cpuidle_data {
85797 unsigned int saved_exit_latency;
85798- struct cpuidle_state *idle_state;
85799+ cpuidle_state_no_const *idle_state;
85800 };
85801
85802 struct generic_pm_domain {
85803diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
85804index 30e84d4..22278b4 100644
85805--- a/include/linux/pm_runtime.h
85806+++ b/include/linux/pm_runtime.h
85807@@ -115,7 +115,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
85808
85809 static inline void pm_runtime_mark_last_busy(struct device *dev)
85810 {
85811- ACCESS_ONCE(dev->power.last_busy) = jiffies;
85812+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
85813 }
85814
85815 static inline bool pm_runtime_is_irq_safe(struct device *dev)
85816diff --git a/include/linux/pnp.h b/include/linux/pnp.h
85817index 6512e9c..ec27fa2 100644
85818--- a/include/linux/pnp.h
85819+++ b/include/linux/pnp.h
85820@@ -298,7 +298,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
85821 struct pnp_fixup {
85822 char id[7];
85823 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
85824-};
85825+} __do_const;
85826
85827 /* config parameters */
85828 #define PNP_CONFIG_NORMAL 0x0001
85829diff --git a/include/linux/poison.h b/include/linux/poison.h
85830index 2110a81..13a11bb 100644
85831--- a/include/linux/poison.h
85832+++ b/include/linux/poison.h
85833@@ -19,8 +19,8 @@
85834 * under normal circumstances, used to verify that nobody uses
85835 * non-initialized list entries.
85836 */
85837-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
85838-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
85839+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
85840+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
85841
85842 /********** include/linux/timer.h **********/
85843 /*
85844diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
85845index d8b187c3..9a9257a 100644
85846--- a/include/linux/power/smartreflex.h
85847+++ b/include/linux/power/smartreflex.h
85848@@ -238,7 +238,7 @@ struct omap_sr_class_data {
85849 int (*notify)(struct omap_sr *sr, u32 status);
85850 u8 notify_flags;
85851 u8 class_type;
85852-};
85853+} __do_const;
85854
85855 /**
85856 * struct omap_sr_nvalue_table - Smartreflex n-target value info
85857diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
85858index 4ea1d37..80f4b33 100644
85859--- a/include/linux/ppp-comp.h
85860+++ b/include/linux/ppp-comp.h
85861@@ -84,7 +84,7 @@ struct compressor {
85862 struct module *owner;
85863 /* Extra skb space needed by the compressor algorithm */
85864 unsigned int comp_extra;
85865-};
85866+} __do_const;
85867
85868 /*
85869 * The return value from decompress routine is the length of the
85870diff --git a/include/linux/preempt.h b/include/linux/preempt.h
85871index de83b4e..c4b997d 100644
85872--- a/include/linux/preempt.h
85873+++ b/include/linux/preempt.h
85874@@ -27,11 +27,16 @@ extern void preempt_count_sub(int val);
85875 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
85876 #endif
85877
85878+#define raw_preempt_count_add(val) __preempt_count_add(val)
85879+#define raw_preempt_count_sub(val) __preempt_count_sub(val)
85880+
85881 #define __preempt_count_inc() __preempt_count_add(1)
85882 #define __preempt_count_dec() __preempt_count_sub(1)
85883
85884 #define preempt_count_inc() preempt_count_add(1)
85885+#define raw_preempt_count_inc() raw_preempt_count_add(1)
85886 #define preempt_count_dec() preempt_count_sub(1)
85887+#define raw_preempt_count_dec() raw_preempt_count_sub(1)
85888
85889 #ifdef CONFIG_PREEMPT_COUNT
85890
85891@@ -41,6 +46,12 @@ do { \
85892 barrier(); \
85893 } while (0)
85894
85895+#define raw_preempt_disable() \
85896+do { \
85897+ raw_preempt_count_inc(); \
85898+ barrier(); \
85899+} while (0)
85900+
85901 #define sched_preempt_enable_no_resched() \
85902 do { \
85903 barrier(); \
85904@@ -49,6 +60,12 @@ do { \
85905
85906 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
85907
85908+#define raw_preempt_enable_no_resched() \
85909+do { \
85910+ barrier(); \
85911+ raw_preempt_count_dec(); \
85912+} while (0)
85913+
85914 #ifdef CONFIG_PREEMPT
85915 #define preempt_enable() \
85916 do { \
85917@@ -113,8 +130,10 @@ do { \
85918 * region.
85919 */
85920 #define preempt_disable() barrier()
85921+#define raw_preempt_disable() barrier()
85922 #define sched_preempt_enable_no_resched() barrier()
85923 #define preempt_enable_no_resched() barrier()
85924+#define raw_preempt_enable_no_resched() barrier()
85925 #define preempt_enable() barrier()
85926 #define preempt_check_resched() do { } while (0)
85927
85928@@ -128,11 +147,13 @@ do { \
85929 /*
85930 * Modules have no business playing preemption tricks.
85931 */
85932+#ifndef CONFIG_PAX_KERNEXEC
85933 #undef sched_preempt_enable_no_resched
85934 #undef preempt_enable_no_resched
85935 #undef preempt_enable_no_resched_notrace
85936 #undef preempt_check_resched
85937 #endif
85938+#endif
85939
85940 #define preempt_set_need_resched() \
85941 do { \
85942diff --git a/include/linux/printk.h b/include/linux/printk.h
85943index baa3f97..168cff1 100644
85944--- a/include/linux/printk.h
85945+++ b/include/linux/printk.h
85946@@ -121,6 +121,7 @@ void early_printk(const char *s, ...) { }
85947 #endif
85948
85949 typedef int(*printk_func_t)(const char *fmt, va_list args);
85950+extern int kptr_restrict;
85951
85952 #ifdef CONFIG_PRINTK
85953 asmlinkage __printf(5, 0)
85954@@ -156,7 +157,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
85955
85956 extern int printk_delay_msec;
85957 extern int dmesg_restrict;
85958-extern int kptr_restrict;
85959
85960 extern void wake_up_klogd(void);
85961
85962diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
85963index b97bf2e..f14c92d4 100644
85964--- a/include/linux/proc_fs.h
85965+++ b/include/linux/proc_fs.h
85966@@ -17,8 +17,11 @@ extern void proc_flush_task(struct task_struct *);
85967 extern struct proc_dir_entry *proc_symlink(const char *,
85968 struct proc_dir_entry *, const char *);
85969 extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
85970+extern struct proc_dir_entry *proc_mkdir_restrict(const char *, struct proc_dir_entry *);
85971 extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
85972 struct proc_dir_entry *, void *);
85973+extern struct proc_dir_entry *proc_mkdir_data_restrict(const char *, umode_t,
85974+ struct proc_dir_entry *, void *);
85975 extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
85976 struct proc_dir_entry *);
85977
85978@@ -34,6 +37,19 @@ static inline struct proc_dir_entry *proc_create(
85979 return proc_create_data(name, mode, parent, proc_fops, NULL);
85980 }
85981
85982+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
85983+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
85984+{
85985+#ifdef CONFIG_GRKERNSEC_PROC_USER
85986+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
85987+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
85988+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
85989+#else
85990+ return proc_create_data(name, mode, parent, proc_fops, NULL);
85991+#endif
85992+}
85993+
85994+
85995 extern void proc_set_size(struct proc_dir_entry *, loff_t);
85996 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
85997 extern void *PDE_DATA(const struct inode *);
85998@@ -56,8 +72,12 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
85999 struct proc_dir_entry *parent,const char *dest) { return NULL;}
86000 static inline struct proc_dir_entry *proc_mkdir(const char *name,
86001 struct proc_dir_entry *parent) {return NULL;}
86002+static inline struct proc_dir_entry *proc_mkdir_restrict(const char *name,
86003+ struct proc_dir_entry *parent) { return NULL; }
86004 static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
86005 umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
86006+static inline struct proc_dir_entry *proc_mkdir_data_restrict(const char *name,
86007+ umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
86008 static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
86009 umode_t mode, struct proc_dir_entry *parent) { return NULL; }
86010 #define proc_create(name, mode, parent, proc_fops) ({NULL;})
86011@@ -79,7 +99,7 @@ struct net;
86012 static inline struct proc_dir_entry *proc_net_mkdir(
86013 struct net *net, const char *name, struct proc_dir_entry *parent)
86014 {
86015- return proc_mkdir_data(name, 0, parent, net);
86016+ return proc_mkdir_data_restrict(name, 0, parent, net);
86017 }
86018
86019 #endif /* _LINUX_PROC_FS_H */
86020diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
86021index 42dfc61..8113a99 100644
86022--- a/include/linux/proc_ns.h
86023+++ b/include/linux/proc_ns.h
86024@@ -16,7 +16,7 @@ struct proc_ns_operations {
86025 struct ns_common *(*get)(struct task_struct *task);
86026 void (*put)(struct ns_common *ns);
86027 int (*install)(struct nsproxy *nsproxy, struct ns_common *ns);
86028-};
86029+} __do_const __randomize_layout;
86030
86031 extern const struct proc_ns_operations netns_operations;
86032 extern const struct proc_ns_operations utsns_operations;
86033diff --git a/include/linux/quota.h b/include/linux/quota.h
86034index d534e8e..782e604 100644
86035--- a/include/linux/quota.h
86036+++ b/include/linux/quota.h
86037@@ -75,7 +75,7 @@ struct kqid { /* Type in which we store the quota identifier */
86038
86039 extern bool qid_eq(struct kqid left, struct kqid right);
86040 extern bool qid_lt(struct kqid left, struct kqid right);
86041-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
86042+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
86043 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
86044 extern bool qid_valid(struct kqid qid);
86045
86046diff --git a/include/linux/random.h b/include/linux/random.h
86047index b05856e..0a9f14e 100644
86048--- a/include/linux/random.h
86049+++ b/include/linux/random.h
86050@@ -9,9 +9,19 @@
86051 #include <uapi/linux/random.h>
86052
86053 extern void add_device_randomness(const void *, unsigned int);
86054+
86055+static inline void add_latent_entropy(void)
86056+{
86057+
86058+#ifdef LATENT_ENTROPY_PLUGIN
86059+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
86060+#endif
86061+
86062+}
86063+
86064 extern void add_input_randomness(unsigned int type, unsigned int code,
86065- unsigned int value);
86066-extern void add_interrupt_randomness(int irq, int irq_flags);
86067+ unsigned int value) __latent_entropy;
86068+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
86069
86070 extern void get_random_bytes(void *buf, int nbytes);
86071 extern void get_random_bytes_arch(void *buf, int nbytes);
86072@@ -22,10 +32,10 @@ extern int random_int_secret_init(void);
86073 extern const struct file_operations random_fops, urandom_fops;
86074 #endif
86075
86076-unsigned int get_random_int(void);
86077+unsigned int __intentional_overflow(-1) get_random_int(void);
86078 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
86079
86080-u32 prandom_u32(void);
86081+u32 prandom_u32(void) __intentional_overflow(-1);
86082 void prandom_bytes(void *buf, size_t nbytes);
86083 void prandom_seed(u32 seed);
86084 void prandom_reseed_late(void);
86085@@ -37,6 +47,11 @@ struct rnd_state {
86086 u32 prandom_u32_state(struct rnd_state *state);
86087 void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
86088
86089+static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
86090+{
86091+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
86092+}
86093+
86094 /**
86095 * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
86096 * @ep_ro: right open interval endpoint
86097@@ -49,7 +64,7 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
86098 *
86099 * Returns: pseudo-random number in interval [0, ep_ro)
86100 */
86101-static inline u32 prandom_u32_max(u32 ep_ro)
86102+static inline u32 __intentional_overflow(-1) prandom_u32_max(u32 ep_ro)
86103 {
86104 return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
86105 }
86106diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
86107index 378c5ee..aa84a47 100644
86108--- a/include/linux/rbtree_augmented.h
86109+++ b/include/linux/rbtree_augmented.h
86110@@ -90,7 +90,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
86111 old->rbaugmented = rbcompute(old); \
86112 } \
86113 rbstatic const struct rb_augment_callbacks rbname = { \
86114- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
86115+ .propagate = rbname ## _propagate, \
86116+ .copy = rbname ## _copy, \
86117+ .rotate = rbname ## _rotate \
86118 };
86119
86120
86121diff --git a/include/linux/rculist.h b/include/linux/rculist.h
86122index a18b16f..2683096 100644
86123--- a/include/linux/rculist.h
86124+++ b/include/linux/rculist.h
86125@@ -29,8 +29,8 @@
86126 */
86127 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
86128 {
86129- ACCESS_ONCE(list->next) = list;
86130- ACCESS_ONCE(list->prev) = list;
86131+ ACCESS_ONCE_RW(list->next) = list;
86132+ ACCESS_ONCE_RW(list->prev) = list;
86133 }
86134
86135 /*
86136@@ -59,6 +59,9 @@ void __list_add_rcu(struct list_head *new,
86137 struct list_head *prev, struct list_head *next);
86138 #endif
86139
86140+void __pax_list_add_rcu(struct list_head *new,
86141+ struct list_head *prev, struct list_head *next);
86142+
86143 /**
86144 * list_add_rcu - add a new entry to rcu-protected list
86145 * @new: new entry to be added
86146@@ -80,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
86147 __list_add_rcu(new, head, head->next);
86148 }
86149
86150+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
86151+{
86152+ __pax_list_add_rcu(new, head, head->next);
86153+}
86154+
86155 /**
86156 * list_add_tail_rcu - add a new entry to rcu-protected list
86157 * @new: new entry to be added
86158@@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
86159 __list_add_rcu(new, head->prev, head);
86160 }
86161
86162+static inline void pax_list_add_tail_rcu(struct list_head *new,
86163+ struct list_head *head)
86164+{
86165+ __pax_list_add_rcu(new, head->prev, head);
86166+}
86167+
86168 /**
86169 * list_del_rcu - deletes entry from list without re-initialization
86170 * @entry: the element to delete from the list.
86171@@ -132,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry)
86172 entry->prev = LIST_POISON2;
86173 }
86174
86175+extern void pax_list_del_rcu(struct list_head *entry);
86176+
86177 /**
86178 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
86179 * @n: the element to delete from the hash list.
86180diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
86181index 7809749..1cd9315 100644
86182--- a/include/linux/rcupdate.h
86183+++ b/include/linux/rcupdate.h
86184@@ -333,7 +333,7 @@ extern struct srcu_struct tasks_rcu_exit_srcu;
86185 do { \
86186 rcu_all_qs(); \
86187 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
86188- ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
86189+ ACCESS_ONCE_RW((t)->rcu_tasks_holdout) = false; \
86190 } while (0)
86191 #else /* #ifdef CONFIG_TASKS_RCU */
86192 #define TASKS_RCU(x) do { } while (0)
86193diff --git a/include/linux/reboot.h b/include/linux/reboot.h
86194index 67fc8fc..a90f7d8 100644
86195--- a/include/linux/reboot.h
86196+++ b/include/linux/reboot.h
86197@@ -47,9 +47,9 @@ extern void do_kernel_restart(char *cmd);
86198 */
86199
86200 extern void migrate_to_reboot_cpu(void);
86201-extern void machine_restart(char *cmd);
86202-extern void machine_halt(void);
86203-extern void machine_power_off(void);
86204+extern void machine_restart(char *cmd) __noreturn;
86205+extern void machine_halt(void) __noreturn;
86206+extern void machine_power_off(void) __noreturn;
86207
86208 extern void machine_shutdown(void);
86209 struct pt_regs;
86210@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
86211 */
86212
86213 extern void kernel_restart_prepare(char *cmd);
86214-extern void kernel_restart(char *cmd);
86215-extern void kernel_halt(void);
86216-extern void kernel_power_off(void);
86217+extern void kernel_restart(char *cmd) __noreturn;
86218+extern void kernel_halt(void) __noreturn;
86219+extern void kernel_power_off(void) __noreturn;
86220
86221 extern int C_A_D; /* for sysctl */
86222 void ctrl_alt_del(void);
86223@@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
86224 * Emergency restart, callable from an interrupt handler.
86225 */
86226
86227-extern void emergency_restart(void);
86228+extern void emergency_restart(void) __noreturn;
86229 #include <asm/emergency-restart.h>
86230
86231 #endif /* _LINUX_REBOOT_H */
86232diff --git a/include/linux/regset.h b/include/linux/regset.h
86233index 8e0c9fe..ac4d221 100644
86234--- a/include/linux/regset.h
86235+++ b/include/linux/regset.h
86236@@ -161,7 +161,8 @@ struct user_regset {
86237 unsigned int align;
86238 unsigned int bias;
86239 unsigned int core_note_type;
86240-};
86241+} __do_const;
86242+typedef struct user_regset __no_const user_regset_no_const;
86243
86244 /**
86245 * struct user_regset_view - available regsets
86246diff --git a/include/linux/relay.h b/include/linux/relay.h
86247index d7c8359..818daf5 100644
86248--- a/include/linux/relay.h
86249+++ b/include/linux/relay.h
86250@@ -157,7 +157,7 @@ struct rchan_callbacks
86251 * The callback should return 0 if successful, negative if not.
86252 */
86253 int (*remove_buf_file)(struct dentry *dentry);
86254-};
86255+} __no_const;
86256
86257 /*
86258 * CONFIG_RELAY kernel API, kernel/relay.c
86259diff --git a/include/linux/rio.h b/include/linux/rio.h
86260index 6bda06f..bf39a9b 100644
86261--- a/include/linux/rio.h
86262+++ b/include/linux/rio.h
86263@@ -358,7 +358,7 @@ struct rio_ops {
86264 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
86265 u64 rstart, u32 size, u32 flags);
86266 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
86267-};
86268+} __no_const;
86269
86270 #define RIO_RESOURCE_MEM 0x00000100
86271 #define RIO_RESOURCE_DOORBELL 0x00000200
86272diff --git a/include/linux/rmap.h b/include/linux/rmap.h
86273index c4c559a..6ba9a26 100644
86274--- a/include/linux/rmap.h
86275+++ b/include/linux/rmap.h
86276@@ -154,8 +154,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
86277 void anon_vma_init(void); /* create anon_vma_cachep */
86278 int anon_vma_prepare(struct vm_area_struct *);
86279 void unlink_anon_vmas(struct vm_area_struct *);
86280-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
86281-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
86282+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
86283+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
86284
86285 static inline void anon_vma_merge(struct vm_area_struct *vma,
86286 struct vm_area_struct *next)
86287diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
86288index ed8f9e70..999bc96 100644
86289--- a/include/linux/scatterlist.h
86290+++ b/include/linux/scatterlist.h
86291@@ -1,6 +1,7 @@
86292 #ifndef _LINUX_SCATTERLIST_H
86293 #define _LINUX_SCATTERLIST_H
86294
86295+#include <linux/sched.h>
86296 #include <linux/string.h>
86297 #include <linux/bug.h>
86298 #include <linux/mm.h>
86299@@ -114,6 +115,12 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
86300 #ifdef CONFIG_DEBUG_SG
86301 BUG_ON(!virt_addr_valid(buf));
86302 #endif
86303+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
86304+ if (object_starts_on_stack(buf)) {
86305+ void *adjbuf = buf - current->stack + current->lowmem_stack;
86306+ sg_set_page(sg, virt_to_page(adjbuf), buflen, offset_in_page(adjbuf));
86307+ } else
86308+#endif
86309 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
86310 }
86311
86312diff --git a/include/linux/sched.h b/include/linux/sched.h
86313index 51348f7..8c8b0ba 100644
86314--- a/include/linux/sched.h
86315+++ b/include/linux/sched.h
86316@@ -133,6 +133,7 @@ struct fs_struct;
86317 struct perf_event_context;
86318 struct blk_plug;
86319 struct filename;
86320+struct linux_binprm;
86321
86322 #define VMACACHE_BITS 2
86323 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
86324@@ -420,7 +421,7 @@ extern char __sched_text_start[], __sched_text_end[];
86325 extern int in_sched_functions(unsigned long addr);
86326
86327 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
86328-extern signed long schedule_timeout(signed long timeout);
86329+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
86330 extern signed long schedule_timeout_interruptible(signed long timeout);
86331 extern signed long schedule_timeout_killable(signed long timeout);
86332 extern signed long schedule_timeout_uninterruptible(signed long timeout);
86333@@ -438,6 +439,19 @@ struct nsproxy;
86334 struct user_namespace;
86335
86336 #ifdef CONFIG_MMU
86337+
86338+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
86339+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
86340+#else
86341+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
86342+{
86343+ return 0;
86344+}
86345+#endif
86346+
86347+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
86348+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
86349+
86350 extern void arch_pick_mmap_layout(struct mm_struct *mm);
86351 extern unsigned long
86352 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
86353@@ -736,6 +750,17 @@ struct signal_struct {
86354 #ifdef CONFIG_TASKSTATS
86355 struct taskstats *stats;
86356 #endif
86357+
86358+#ifdef CONFIG_GRKERNSEC
86359+ u32 curr_ip;
86360+ u32 saved_ip;
86361+ u32 gr_saddr;
86362+ u32 gr_daddr;
86363+ u16 gr_sport;
86364+ u16 gr_dport;
86365+ u8 used_accept:1;
86366+#endif
86367+
86368 #ifdef CONFIG_AUDIT
86369 unsigned audit_tty;
86370 unsigned audit_tty_log_passwd;
86371@@ -762,7 +787,7 @@ struct signal_struct {
86372 struct mutex cred_guard_mutex; /* guard against foreign influences on
86373 * credential calculations
86374 * (notably. ptrace) */
86375-};
86376+} __randomize_layout;
86377
86378 /*
86379 * Bits in flags field of signal_struct.
86380@@ -815,6 +840,14 @@ struct user_struct {
86381 struct key *session_keyring; /* UID's default session keyring */
86382 #endif
86383
86384+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
86385+ unsigned char kernel_banned;
86386+#endif
86387+#ifdef CONFIG_GRKERNSEC_BRUTE
86388+ unsigned char suid_banned;
86389+ unsigned long suid_ban_expires;
86390+#endif
86391+
86392 /* Hash table maintenance information */
86393 struct hlist_node uidhash_node;
86394 kuid_t uid;
86395@@ -822,7 +855,7 @@ struct user_struct {
86396 #ifdef CONFIG_PERF_EVENTS
86397 atomic_long_t locked_vm;
86398 #endif
86399-};
86400+} __randomize_layout;
86401
86402 extern int uids_sysfs_init(void);
86403
86404@@ -1286,6 +1319,9 @@ enum perf_event_task_context {
86405 struct task_struct {
86406 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
86407 void *stack;
86408+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
86409+ void *lowmem_stack;
86410+#endif
86411 atomic_t usage;
86412 unsigned int flags; /* per process flags, defined below */
86413 unsigned int ptrace;
86414@@ -1419,8 +1455,8 @@ struct task_struct {
86415 struct list_head thread_node;
86416
86417 struct completion *vfork_done; /* for vfork() */
86418- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
86419- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
86420+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
86421+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
86422
86423 cputime_t utime, stime, utimescaled, stimescaled;
86424 cputime_t gtime;
86425@@ -1445,11 +1481,6 @@ struct task_struct {
86426 struct task_cputime cputime_expires;
86427 struct list_head cpu_timers[3];
86428
86429-/* process credentials */
86430- const struct cred __rcu *real_cred; /* objective and real subjective task
86431- * credentials (COW) */
86432- const struct cred __rcu *cred; /* effective (overridable) subjective task
86433- * credentials (COW) */
86434 char comm[TASK_COMM_LEN]; /* executable name excluding path
86435 - access with [gs]et_task_comm (which lock
86436 it with task_lock())
86437@@ -1467,6 +1498,10 @@ struct task_struct {
86438 #endif
86439 /* CPU-specific state of this task */
86440 struct thread_struct thread;
86441+/* thread_info moved to task_struct */
86442+#ifdef CONFIG_X86
86443+ struct thread_info tinfo;
86444+#endif
86445 /* filesystem information */
86446 struct fs_struct *fs;
86447 /* open file information */
86448@@ -1541,6 +1576,10 @@ struct task_struct {
86449 gfp_t lockdep_reclaim_gfp;
86450 #endif
86451
86452+/* process credentials */
86453+ const struct cred __rcu *real_cred; /* objective and real subjective task
86454+ * credentials (COW) */
86455+
86456 /* journalling filesystem info */
86457 void *journal_info;
86458
86459@@ -1579,6 +1618,10 @@ struct task_struct {
86460 /* cg_list protected by css_set_lock and tsk->alloc_lock */
86461 struct list_head cg_list;
86462 #endif
86463+
86464+ const struct cred __rcu *cred; /* effective (overridable) subjective task
86465+ * credentials (COW) */
86466+
86467 #ifdef CONFIG_FUTEX
86468 struct robust_list_head __user *robust_list;
86469 #ifdef CONFIG_COMPAT
86470@@ -1690,7 +1733,7 @@ struct task_struct {
86471 * Number of functions that haven't been traced
86472 * because of depth overrun.
86473 */
86474- atomic_t trace_overrun;
86475+ atomic_unchecked_t trace_overrun;
86476 /* Pause for the tracing */
86477 atomic_t tracing_graph_pause;
86478 #endif
86479@@ -1718,7 +1761,78 @@ struct task_struct {
86480 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
86481 unsigned long task_state_change;
86482 #endif
86483-};
86484+
86485+#ifdef CONFIG_GRKERNSEC
86486+ /* grsecurity */
86487+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
86488+ u64 exec_id;
86489+#endif
86490+#ifdef CONFIG_GRKERNSEC_SETXID
86491+ const struct cred *delayed_cred;
86492+#endif
86493+ struct dentry *gr_chroot_dentry;
86494+ struct acl_subject_label *acl;
86495+ struct acl_subject_label *tmpacl;
86496+ struct acl_role_label *role;
86497+ struct file *exec_file;
86498+ unsigned long brute_expires;
86499+ u16 acl_role_id;
86500+ u8 inherited;
86501+ /* is this the task that authenticated to the special role */
86502+ u8 acl_sp_role;
86503+ u8 is_writable;
86504+ u8 brute;
86505+ u8 gr_is_chrooted;
86506+#endif
86507+
86508+} __randomize_layout;
86509+
86510+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
86511+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
86512+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
86513+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
86514+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
86515+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
86516+
86517+#ifdef CONFIG_PAX_SOFTMODE
86518+extern int pax_softmode;
86519+#endif
86520+
86521+extern int pax_check_flags(unsigned long *);
86522+#define PAX_PARSE_FLAGS_FALLBACK (~0UL)
86523+
86524+/* if tsk != current then task_lock must be held on it */
86525+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
86526+static inline unsigned long pax_get_flags(struct task_struct *tsk)
86527+{
86528+ if (likely(tsk->mm))
86529+ return tsk->mm->pax_flags;
86530+ else
86531+ return 0UL;
86532+}
86533+
86534+/* if tsk != current then task_lock must be held on it */
86535+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
86536+{
86537+ if (likely(tsk->mm)) {
86538+ tsk->mm->pax_flags = flags;
86539+ return 0;
86540+ }
86541+ return -EINVAL;
86542+}
86543+#endif
86544+
86545+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
86546+extern void pax_set_initial_flags(struct linux_binprm *bprm);
86547+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
86548+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
86549+#endif
86550+
86551+struct path;
86552+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
86553+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
86554+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
86555+extern void pax_report_refcount_overflow(struct pt_regs *regs);
86556
86557 /* Future-safe accessor for struct task_struct's cpus_allowed. */
86558 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
86559@@ -1801,7 +1915,7 @@ struct pid_namespace;
86560 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
86561 struct pid_namespace *ns);
86562
86563-static inline pid_t task_pid_nr(struct task_struct *tsk)
86564+static inline pid_t task_pid_nr(const struct task_struct *tsk)
86565 {
86566 return tsk->pid;
86567 }
86568@@ -2169,6 +2283,25 @@ extern u64 sched_clock_cpu(int cpu);
86569
86570 extern void sched_clock_init(void);
86571
86572+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
86573+static inline void populate_stack(void)
86574+{
86575+ struct task_struct *curtask = current;
86576+ int c;
86577+ int *ptr = curtask->stack;
86578+ int *end = curtask->stack + THREAD_SIZE;
86579+
86580+ while (ptr < end) {
86581+ c = *(volatile int *)ptr;
86582+ ptr += PAGE_SIZE/sizeof(int);
86583+ }
86584+}
86585+#else
86586+static inline void populate_stack(void)
86587+{
86588+}
86589+#endif
86590+
86591 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
86592 static inline void sched_clock_tick(void)
86593 {
86594@@ -2302,7 +2435,9 @@ void yield(void);
86595 extern struct exec_domain default_exec_domain;
86596
86597 union thread_union {
86598+#ifndef CONFIG_X86
86599 struct thread_info thread_info;
86600+#endif
86601 unsigned long stack[THREAD_SIZE/sizeof(long)];
86602 };
86603
86604@@ -2335,6 +2470,7 @@ extern struct pid_namespace init_pid_ns;
86605 */
86606
86607 extern struct task_struct *find_task_by_vpid(pid_t nr);
86608+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
86609 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
86610 struct pid_namespace *ns);
86611
86612@@ -2499,7 +2635,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
86613 extern void exit_itimers(struct signal_struct *);
86614 extern void flush_itimer_signals(void);
86615
86616-extern void do_group_exit(int);
86617+extern __noreturn void do_group_exit(int);
86618
86619 extern int do_execve(struct filename *,
86620 const char __user * const __user *,
86621@@ -2720,9 +2856,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
86622 #define task_stack_end_corrupted(task) \
86623 (*(end_of_stack(task)) != STACK_END_MAGIC)
86624
86625-static inline int object_is_on_stack(void *obj)
86626+static inline int object_starts_on_stack(const void *obj)
86627 {
86628- void *stack = task_stack_page(current);
86629+ const void *stack = task_stack_page(current);
86630
86631 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
86632 }
86633diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
86634index 596a0e0..bea77ec 100644
86635--- a/include/linux/sched/sysctl.h
86636+++ b/include/linux/sched/sysctl.h
86637@@ -34,6 +34,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
86638 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
86639
86640 extern int sysctl_max_map_count;
86641+extern unsigned long sysctl_heap_stack_gap;
86642
86643 extern unsigned int sysctl_sched_latency;
86644 extern unsigned int sysctl_sched_min_granularity;
86645diff --git a/include/linux/security.h b/include/linux/security.h
86646index a1b7dbd..036f47f 100644
86647--- a/include/linux/security.h
86648+++ b/include/linux/security.h
86649@@ -27,6 +27,7 @@
86650 #include <linux/slab.h>
86651 #include <linux/err.h>
86652 #include <linux/string.h>
86653+#include <linux/grsecurity.h>
86654
86655 struct linux_binprm;
86656 struct cred;
86657@@ -116,8 +117,6 @@ struct seq_file;
86658
86659 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
86660
86661-void reset_security_ops(void);
86662-
86663 #ifdef CONFIG_MMU
86664 extern unsigned long mmap_min_addr;
86665 extern unsigned long dac_mmap_min_addr;
86666@@ -1756,7 +1755,7 @@ struct security_operations {
86667 struct audit_context *actx);
86668 void (*audit_rule_free) (void *lsmrule);
86669 #endif /* CONFIG_AUDIT */
86670-};
86671+} __randomize_layout;
86672
86673 /* prototypes */
86674 extern int security_init(void);
86675diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
86676index dc368b8..e895209 100644
86677--- a/include/linux/semaphore.h
86678+++ b/include/linux/semaphore.h
86679@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
86680 }
86681
86682 extern void down(struct semaphore *sem);
86683-extern int __must_check down_interruptible(struct semaphore *sem);
86684+extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
86685 extern int __must_check down_killable(struct semaphore *sem);
86686 extern int __must_check down_trylock(struct semaphore *sem);
86687 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
86688diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
86689index afbb1fd..e1d205d 100644
86690--- a/include/linux/seq_file.h
86691+++ b/include/linux/seq_file.h
86692@@ -27,6 +27,9 @@ struct seq_file {
86693 struct mutex lock;
86694 const struct seq_operations *op;
86695 int poll_event;
86696+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
86697+ u64 exec_id;
86698+#endif
86699 #ifdef CONFIG_USER_NS
86700 struct user_namespace *user_ns;
86701 #endif
86702@@ -39,6 +42,7 @@ struct seq_operations {
86703 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
86704 int (*show) (struct seq_file *m, void *v);
86705 };
86706+typedef struct seq_operations __no_const seq_operations_no_const;
86707
86708 #define SEQ_SKIP 1
86709
86710@@ -111,6 +115,7 @@ void seq_pad(struct seq_file *m, char c);
86711
86712 char *mangle_path(char *s, const char *p, const char *esc);
86713 int seq_open(struct file *, const struct seq_operations *);
86714+int seq_open_restrict(struct file *, const struct seq_operations *);
86715 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
86716 loff_t seq_lseek(struct file *, loff_t, int);
86717 int seq_release(struct inode *, struct file *);
86718@@ -128,6 +133,7 @@ int seq_path_root(struct seq_file *m, const struct path *path,
86719 const struct path *root, const char *esc);
86720
86721 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
86722+int single_open_restrict(struct file *, int (*)(struct seq_file *, void *), void *);
86723 int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
86724 int single_release(struct inode *, struct file *);
86725 void *__seq_open_private(struct file *, const struct seq_operations *, int);
86726diff --git a/include/linux/shm.h b/include/linux/shm.h
86727index 6fb8016..ab4465e 100644
86728--- a/include/linux/shm.h
86729+++ b/include/linux/shm.h
86730@@ -22,6 +22,10 @@ struct shmid_kernel /* private to the kernel */
86731 /* The task created the shm object. NULL if the task is dead. */
86732 struct task_struct *shm_creator;
86733 struct list_head shm_clist; /* list by creator */
86734+#ifdef CONFIG_GRKERNSEC
86735+ u64 shm_createtime;
86736+ pid_t shm_lapid;
86737+#endif
86738 };
86739
86740 /* shm_mode upper byte flags */
86741diff --git a/include/linux/signal.h b/include/linux/signal.h
86742index ab1e039..ad4229e 100644
86743--- a/include/linux/signal.h
86744+++ b/include/linux/signal.h
86745@@ -289,7 +289,7 @@ static inline void allow_signal(int sig)
86746 * know it'll be handled, so that they don't get converted to
86747 * SIGKILL or just silently dropped.
86748 */
86749- kernel_sigaction(sig, (__force __sighandler_t)2);
86750+ kernel_sigaction(sig, (__force_user __sighandler_t)2);
86751 }
86752
86753 static inline void disallow_signal(int sig)
86754diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
86755index bdccc4b..e9f8670 100644
86756--- a/include/linux/skbuff.h
86757+++ b/include/linux/skbuff.h
86758@@ -771,7 +771,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
86759 int node);
86760 struct sk_buff *__build_skb(void *data, unsigned int frag_size);
86761 struct sk_buff *build_skb(void *data, unsigned int frag_size);
86762-static inline struct sk_buff *alloc_skb(unsigned int size,
86763+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
86764 gfp_t priority)
86765 {
86766 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
86767@@ -1967,7 +1967,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
86768 return skb->inner_transport_header - skb->inner_network_header;
86769 }
86770
86771-static inline int skb_network_offset(const struct sk_buff *skb)
86772+static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb)
86773 {
86774 return skb_network_header(skb) - skb->data;
86775 }
86776@@ -2027,7 +2027,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
86777 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
86778 */
86779 #ifndef NET_SKB_PAD
86780-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
86781+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
86782 #endif
86783
86784 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
86785@@ -2669,9 +2669,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
86786 int *err);
86787 unsigned int datagram_poll(struct file *file, struct socket *sock,
86788 struct poll_table_struct *wait);
86789-int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
86790+int __intentional_overflow(0) skb_copy_datagram_iter(const struct sk_buff *from, int offset,
86791 struct iov_iter *to, int size);
86792-static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
86793+static inline int __intentional_overflow(2,4) skb_copy_datagram_msg(const struct sk_buff *from, int offset,
86794 struct msghdr *msg, int size)
86795 {
86796 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
86797@@ -3193,6 +3193,9 @@ static inline void nf_reset(struct sk_buff *skb)
86798 nf_bridge_put(skb->nf_bridge);
86799 skb->nf_bridge = NULL;
86800 #endif
86801+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
86802+ skb->nf_trace = 0;
86803+#endif
86804 }
86805
86806 static inline void nf_reset_trace(struct sk_buff *skb)
86807diff --git a/include/linux/slab.h b/include/linux/slab.h
86808index 76f1fee..d95e6d2 100644
86809--- a/include/linux/slab.h
86810+++ b/include/linux/slab.h
86811@@ -14,15 +14,29 @@
86812 #include <linux/gfp.h>
86813 #include <linux/types.h>
86814 #include <linux/workqueue.h>
86815-
86816+#include <linux/err.h>
86817
86818 /*
86819 * Flags to pass to kmem_cache_create().
86820 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
86821 */
86822 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
86823+
86824+#ifdef CONFIG_PAX_USERCOPY_SLABS
86825+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
86826+#else
86827+#define SLAB_USERCOPY 0x00000000UL
86828+#endif
86829+
86830 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
86831 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
86832+
86833+#ifdef CONFIG_PAX_MEMORY_SANITIZE
86834+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
86835+#else
86836+#define SLAB_NO_SANITIZE 0x00000000UL
86837+#endif
86838+
86839 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
86840 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
86841 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
86842@@ -98,10 +112,13 @@
86843 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
86844 * Both make kfree a no-op.
86845 */
86846-#define ZERO_SIZE_PTR ((void *)16)
86847+#define ZERO_SIZE_PTR \
86848+({ \
86849+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
86850+ (void *)(-MAX_ERRNO-1L); \
86851+})
86852
86853-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
86854- (unsigned long)ZERO_SIZE_PTR)
86855+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
86856
86857 #include <linux/kmemleak.h>
86858 #include <linux/kasan.h>
86859@@ -143,6 +160,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
86860 void kfree(const void *);
86861 void kzfree(const void *);
86862 size_t ksize(const void *);
86863+const char *check_heap_object(const void *ptr, unsigned long n);
86864+bool is_usercopy_object(const void *ptr);
86865
86866 /*
86867 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
86868@@ -235,6 +254,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
86869 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
86870 #endif
86871
86872+#ifdef CONFIG_PAX_USERCOPY_SLABS
86873+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
86874+#endif
86875+
86876 /*
86877 * Figure out which kmalloc slab an allocation of a certain size
86878 * belongs to.
86879@@ -243,7 +266,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
86880 * 2 = 120 .. 192 bytes
86881 * n = 2^(n-1) .. 2^n -1
86882 */
86883-static __always_inline int kmalloc_index(size_t size)
86884+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
86885 {
86886 if (!size)
86887 return 0;
86888@@ -286,15 +309,15 @@ static __always_inline int kmalloc_index(size_t size)
86889 }
86890 #endif /* !CONFIG_SLOB */
86891
86892-void *__kmalloc(size_t size, gfp_t flags);
86893+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
86894 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
86895 void kmem_cache_free(struct kmem_cache *, void *);
86896
86897 #ifdef CONFIG_NUMA
86898-void *__kmalloc_node(size_t size, gfp_t flags, int node);
86899+void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1) __size_overflow(1);
86900 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
86901 #else
86902-static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
86903+static __always_inline void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
86904 {
86905 return __kmalloc(size, flags);
86906 }
86907diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
86908index 33d0490..70a6313 100644
86909--- a/include/linux/slab_def.h
86910+++ b/include/linux/slab_def.h
86911@@ -40,7 +40,7 @@ struct kmem_cache {
86912 /* 4) cache creation/removal */
86913 const char *name;
86914 struct list_head list;
86915- int refcount;
86916+ atomic_t refcount;
86917 int object_size;
86918 int align;
86919
86920@@ -56,10 +56,14 @@ struct kmem_cache {
86921 unsigned long node_allocs;
86922 unsigned long node_frees;
86923 unsigned long node_overflow;
86924- atomic_t allochit;
86925- atomic_t allocmiss;
86926- atomic_t freehit;
86927- atomic_t freemiss;
86928+ atomic_unchecked_t allochit;
86929+ atomic_unchecked_t allocmiss;
86930+ atomic_unchecked_t freehit;
86931+ atomic_unchecked_t freemiss;
86932+#ifdef CONFIG_PAX_MEMORY_SANITIZE
86933+ atomic_unchecked_t sanitized;
86934+ atomic_unchecked_t not_sanitized;
86935+#endif
86936
86937 /*
86938 * If debugging is enabled, then the allocator can add additional
86939diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
86940index 3388511..6252f90 100644
86941--- a/include/linux/slub_def.h
86942+++ b/include/linux/slub_def.h
86943@@ -74,7 +74,7 @@ struct kmem_cache {
86944 struct kmem_cache_order_objects max;
86945 struct kmem_cache_order_objects min;
86946 gfp_t allocflags; /* gfp flags to use on each alloc */
86947- int refcount; /* Refcount for slab cache destroy */
86948+ atomic_t refcount; /* Refcount for slab cache destroy */
86949 void (*ctor)(void *);
86950 int inuse; /* Offset to metadata */
86951 int align; /* Alignment */
86952diff --git a/include/linux/smp.h b/include/linux/smp.h
86953index be91db2..3f23232 100644
86954--- a/include/linux/smp.h
86955+++ b/include/linux/smp.h
86956@@ -183,7 +183,9 @@ static inline void smp_init(void) { }
86957 #endif
86958
86959 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
86960+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
86961 #define put_cpu() preempt_enable()
86962+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
86963
86964 /*
86965 * Callback to arch code if there's nosmp or maxcpus=0 on the
86966diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
86967index 46cca4c..3323536 100644
86968--- a/include/linux/sock_diag.h
86969+++ b/include/linux/sock_diag.h
86970@@ -11,7 +11,7 @@ struct sock;
86971 struct sock_diag_handler {
86972 __u8 family;
86973 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
86974-};
86975+} __do_const;
86976
86977 int sock_diag_register(const struct sock_diag_handler *h);
86978 void sock_diag_unregister(const struct sock_diag_handler *h);
86979diff --git a/include/linux/sonet.h b/include/linux/sonet.h
86980index 680f9a3..f13aeb0 100644
86981--- a/include/linux/sonet.h
86982+++ b/include/linux/sonet.h
86983@@ -7,7 +7,7 @@
86984 #include <uapi/linux/sonet.h>
86985
86986 struct k_sonet_stats {
86987-#define __HANDLE_ITEM(i) atomic_t i
86988+#define __HANDLE_ITEM(i) atomic_unchecked_t i
86989 __SONET_ITEMS
86990 #undef __HANDLE_ITEM
86991 };
86992diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
86993index 07d8e53..dc934c9 100644
86994--- a/include/linux/sunrpc/addr.h
86995+++ b/include/linux/sunrpc/addr.h
86996@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
86997 {
86998 switch (sap->sa_family) {
86999 case AF_INET:
87000- return ntohs(((struct sockaddr_in *)sap)->sin_port);
87001+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
87002 case AF_INET6:
87003- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
87004+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
87005 }
87006 return 0;
87007 }
87008@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
87009 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
87010 const struct sockaddr *src)
87011 {
87012- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
87013+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
87014 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
87015
87016 dsin->sin_family = ssin->sin_family;
87017@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
87018 if (sa->sa_family != AF_INET6)
87019 return 0;
87020
87021- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
87022+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
87023 }
87024
87025 #endif /* _LINUX_SUNRPC_ADDR_H */
87026diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
87027index 598ba80..d90cba6 100644
87028--- a/include/linux/sunrpc/clnt.h
87029+++ b/include/linux/sunrpc/clnt.h
87030@@ -100,7 +100,7 @@ struct rpc_procinfo {
87031 unsigned int p_timer; /* Which RTT timer to use */
87032 u32 p_statidx; /* Which procedure to account */
87033 const char * p_name; /* name of procedure */
87034-};
87035+} __do_const;
87036
87037 #ifdef __KERNEL__
87038
87039diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
87040index fae6fb9..023fbcd 100644
87041--- a/include/linux/sunrpc/svc.h
87042+++ b/include/linux/sunrpc/svc.h
87043@@ -420,7 +420,7 @@ struct svc_procedure {
87044 unsigned int pc_count; /* call count */
87045 unsigned int pc_cachetype; /* cache info (NFS) */
87046 unsigned int pc_xdrressize; /* maximum size of XDR reply */
87047-};
87048+} __do_const;
87049
87050 /*
87051 * Function prototypes.
87052diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
87053index df8edf8..d140fec 100644
87054--- a/include/linux/sunrpc/svc_rdma.h
87055+++ b/include/linux/sunrpc/svc_rdma.h
87056@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
87057 extern unsigned int svcrdma_max_requests;
87058 extern unsigned int svcrdma_max_req_size;
87059
87060-extern atomic_t rdma_stat_recv;
87061-extern atomic_t rdma_stat_read;
87062-extern atomic_t rdma_stat_write;
87063-extern atomic_t rdma_stat_sq_starve;
87064-extern atomic_t rdma_stat_rq_starve;
87065-extern atomic_t rdma_stat_rq_poll;
87066-extern atomic_t rdma_stat_rq_prod;
87067-extern atomic_t rdma_stat_sq_poll;
87068-extern atomic_t rdma_stat_sq_prod;
87069+extern atomic_unchecked_t rdma_stat_recv;
87070+extern atomic_unchecked_t rdma_stat_read;
87071+extern atomic_unchecked_t rdma_stat_write;
87072+extern atomic_unchecked_t rdma_stat_sq_starve;
87073+extern atomic_unchecked_t rdma_stat_rq_starve;
87074+extern atomic_unchecked_t rdma_stat_rq_poll;
87075+extern atomic_unchecked_t rdma_stat_rq_prod;
87076+extern atomic_unchecked_t rdma_stat_sq_poll;
87077+extern atomic_unchecked_t rdma_stat_sq_prod;
87078
87079 /*
87080 * Contexts are built when an RDMA request is created and are a
87081diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
87082index 8d71d65..f79586e 100644
87083--- a/include/linux/sunrpc/svcauth.h
87084+++ b/include/linux/sunrpc/svcauth.h
87085@@ -120,7 +120,7 @@ struct auth_ops {
87086 int (*release)(struct svc_rqst *rq);
87087 void (*domain_release)(struct auth_domain *);
87088 int (*set_client)(struct svc_rqst *rq);
87089-};
87090+} __do_const;
87091
87092 #define SVC_GARBAGE 1
87093 #define SVC_SYSERR 2
87094diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
87095index e7a018e..49f8b17 100644
87096--- a/include/linux/swiotlb.h
87097+++ b/include/linux/swiotlb.h
87098@@ -60,7 +60,8 @@ extern void
87099
87100 extern void
87101 swiotlb_free_coherent(struct device *hwdev, size_t size,
87102- void *vaddr, dma_addr_t dma_handle);
87103+ void *vaddr, dma_addr_t dma_handle,
87104+ struct dma_attrs *attrs);
87105
87106 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
87107 unsigned long offset, size_t size,
87108diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
87109index 76d1e38..200776e 100644
87110--- a/include/linux/syscalls.h
87111+++ b/include/linux/syscalls.h
87112@@ -102,7 +102,12 @@ union bpf_attr;
87113 #define __TYPE_IS_L(t) (__same_type((t)0, 0L))
87114 #define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
87115 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
87116-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
87117+#define __SC_LONG(t, a) __typeof__( \
87118+ __builtin_choose_expr( \
87119+ sizeof(t) > sizeof(int), \
87120+ (t) 0, \
87121+ __builtin_choose_expr(__type_is_unsigned(t), 0UL, 0L) \
87122+ )) a
87123 #define __SC_CAST(t, a) (t) a
87124 #define __SC_ARGS(t, a) a
87125 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
87126@@ -384,11 +389,11 @@ asmlinkage long sys_sync(void);
87127 asmlinkage long sys_fsync(unsigned int fd);
87128 asmlinkage long sys_fdatasync(unsigned int fd);
87129 asmlinkage long sys_bdflush(int func, long data);
87130-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
87131- char __user *type, unsigned long flags,
87132+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
87133+ const char __user *type, unsigned long flags,
87134 void __user *data);
87135-asmlinkage long sys_umount(char __user *name, int flags);
87136-asmlinkage long sys_oldumount(char __user *name);
87137+asmlinkage long sys_umount(const char __user *name, int flags);
87138+asmlinkage long sys_oldumount(const char __user *name);
87139 asmlinkage long sys_truncate(const char __user *path, long length);
87140 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
87141 asmlinkage long sys_stat(const char __user *filename,
87142@@ -604,7 +609,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
87143 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
87144 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
87145 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
87146- struct sockaddr __user *, int);
87147+ struct sockaddr __user *, int) __intentional_overflow(0);
87148 asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
87149 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
87150 unsigned int vlen, unsigned flags);
87151@@ -663,10 +668,10 @@ asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
87152
87153 asmlinkage long sys_semget(key_t key, int nsems, int semflg);
87154 asmlinkage long sys_semop(int semid, struct sembuf __user *sops,
87155- unsigned nsops);
87156+ long nsops);
87157 asmlinkage long sys_semctl(int semid, int semnum, int cmd, unsigned long arg);
87158 asmlinkage long sys_semtimedop(int semid, struct sembuf __user *sops,
87159- unsigned nsops,
87160+ long nsops,
87161 const struct timespec __user *timeout);
87162 asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg);
87163 asmlinkage long sys_shmget(key_t key, size_t size, int flag);
87164diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
87165index 27b3b0b..e093dd9 100644
87166--- a/include/linux/syscore_ops.h
87167+++ b/include/linux/syscore_ops.h
87168@@ -16,7 +16,7 @@ struct syscore_ops {
87169 int (*suspend)(void);
87170 void (*resume)(void);
87171 void (*shutdown)(void);
87172-};
87173+} __do_const;
87174
87175 extern void register_syscore_ops(struct syscore_ops *ops);
87176 extern void unregister_syscore_ops(struct syscore_ops *ops);
87177diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
87178index b7361f8..341a15a 100644
87179--- a/include/linux/sysctl.h
87180+++ b/include/linux/sysctl.h
87181@@ -39,6 +39,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
87182
87183 extern int proc_dostring(struct ctl_table *, int,
87184 void __user *, size_t *, loff_t *);
87185+extern int proc_dostring_modpriv(struct ctl_table *, int,
87186+ void __user *, size_t *, loff_t *);
87187 extern int proc_dointvec(struct ctl_table *, int,
87188 void __user *, size_t *, loff_t *);
87189 extern int proc_dointvec_minmax(struct ctl_table *, int,
87190@@ -113,7 +115,8 @@ struct ctl_table
87191 struct ctl_table_poll *poll;
87192 void *extra1;
87193 void *extra2;
87194-};
87195+} __do_const __randomize_layout;
87196+typedef struct ctl_table __no_const ctl_table_no_const;
87197
87198 struct ctl_node {
87199 struct rb_node node;
87200diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
87201index ddad161..a3efd26 100644
87202--- a/include/linux/sysfs.h
87203+++ b/include/linux/sysfs.h
87204@@ -34,7 +34,8 @@ struct attribute {
87205 struct lock_class_key *key;
87206 struct lock_class_key skey;
87207 #endif
87208-};
87209+} __do_const;
87210+typedef struct attribute __no_const attribute_no_const;
87211
87212 /**
87213 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
87214@@ -63,7 +64,8 @@ struct attribute_group {
87215 struct attribute *, int);
87216 struct attribute **attrs;
87217 struct bin_attribute **bin_attrs;
87218-};
87219+} __do_const;
87220+typedef struct attribute_group __no_const attribute_group_no_const;
87221
87222 /**
87223 * Use these macros to make defining attributes easier. See include/linux/device.h
87224@@ -137,7 +139,8 @@ struct bin_attribute {
87225 char *, loff_t, size_t);
87226 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
87227 struct vm_area_struct *vma);
87228-};
87229+} __do_const;
87230+typedef struct bin_attribute __no_const bin_attribute_no_const;
87231
87232 /**
87233 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
87234diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
87235index 387fa7d..3fcde6b 100644
87236--- a/include/linux/sysrq.h
87237+++ b/include/linux/sysrq.h
87238@@ -16,6 +16,7 @@
87239
87240 #include <linux/errno.h>
87241 #include <linux/types.h>
87242+#include <linux/compiler.h>
87243
87244 /* Possible values of bitmask for enabling sysrq functions */
87245 /* 0x0001 is reserved for enable everything */
87246@@ -33,7 +34,7 @@ struct sysrq_key_op {
87247 char *help_msg;
87248 char *action_msg;
87249 int enable_mask;
87250-};
87251+} __do_const;
87252
87253 #ifdef CONFIG_MAGIC_SYSRQ
87254
87255diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
87256index ff307b5..f1a4468 100644
87257--- a/include/linux/thread_info.h
87258+++ b/include/linux/thread_info.h
87259@@ -145,6 +145,13 @@ static inline bool test_and_clear_restore_sigmask(void)
87260 #error "no set_restore_sigmask() provided and default one won't work"
87261 #endif
87262
87263+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size);
87264+
87265+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
87266+{
87267+ __check_object_size(ptr, n, to_user, __builtin_constant_p(n));
87268+}
87269+
87270 #endif /* __KERNEL__ */
87271
87272 #endif /* _LINUX_THREAD_INFO_H */
87273diff --git a/include/linux/tty.h b/include/linux/tty.h
87274index 358a337..8829c1f 100644
87275--- a/include/linux/tty.h
87276+++ b/include/linux/tty.h
87277@@ -225,7 +225,7 @@ struct tty_port {
87278 const struct tty_port_operations *ops; /* Port operations */
87279 spinlock_t lock; /* Lock protecting tty field */
87280 int blocked_open; /* Waiting to open */
87281- int count; /* Usage count */
87282+ atomic_t count; /* Usage count */
87283 wait_queue_head_t open_wait; /* Open waiters */
87284 wait_queue_head_t close_wait; /* Close waiters */
87285 wait_queue_head_t delta_msr_wait; /* Modem status change */
87286@@ -313,7 +313,7 @@ struct tty_struct {
87287 /* If the tty has a pending do_SAK, queue it here - akpm */
87288 struct work_struct SAK_work;
87289 struct tty_port *port;
87290-};
87291+} __randomize_layout;
87292
87293 /* Each of a tty's open files has private_data pointing to tty_file_private */
87294 struct tty_file_private {
87295@@ -572,7 +572,7 @@ extern int tty_port_open(struct tty_port *port,
87296 struct tty_struct *tty, struct file *filp);
87297 static inline int tty_port_users(struct tty_port *port)
87298 {
87299- return port->count + port->blocked_open;
87300+ return atomic_read(&port->count) + port->blocked_open;
87301 }
87302
87303 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
87304diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
87305index 92e337c..f46757b 100644
87306--- a/include/linux/tty_driver.h
87307+++ b/include/linux/tty_driver.h
87308@@ -291,7 +291,7 @@ struct tty_operations {
87309 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
87310 #endif
87311 const struct file_operations *proc_fops;
87312-};
87313+} __do_const __randomize_layout;
87314
87315 struct tty_driver {
87316 int magic; /* magic number for this structure */
87317@@ -325,7 +325,7 @@ struct tty_driver {
87318
87319 const struct tty_operations *ops;
87320 struct list_head tty_drivers;
87321-};
87322+} __randomize_layout;
87323
87324 extern struct list_head tty_drivers;
87325
87326diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
87327index 00c9d68..bc0188b 100644
87328--- a/include/linux/tty_ldisc.h
87329+++ b/include/linux/tty_ldisc.h
87330@@ -215,7 +215,7 @@ struct tty_ldisc_ops {
87331
87332 struct module *owner;
87333
87334- int refcount;
87335+ atomic_t refcount;
87336 };
87337
87338 struct tty_ldisc {
87339diff --git a/include/linux/types.h b/include/linux/types.h
87340index 6747247..fc7ec8b 100644
87341--- a/include/linux/types.h
87342+++ b/include/linux/types.h
87343@@ -174,10 +174,26 @@ typedef struct {
87344 int counter;
87345 } atomic_t;
87346
87347+#ifdef CONFIG_PAX_REFCOUNT
87348+typedef struct {
87349+ int counter;
87350+} atomic_unchecked_t;
87351+#else
87352+typedef atomic_t atomic_unchecked_t;
87353+#endif
87354+
87355 #ifdef CONFIG_64BIT
87356 typedef struct {
87357 long counter;
87358 } atomic64_t;
87359+
87360+#ifdef CONFIG_PAX_REFCOUNT
87361+typedef struct {
87362+ long counter;
87363+} atomic64_unchecked_t;
87364+#else
87365+typedef atomic64_t atomic64_unchecked_t;
87366+#endif
87367 #endif
87368
87369 struct list_head {
87370diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
87371index ecd3319..8a36ded 100644
87372--- a/include/linux/uaccess.h
87373+++ b/include/linux/uaccess.h
87374@@ -75,11 +75,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
87375 long ret; \
87376 mm_segment_t old_fs = get_fs(); \
87377 \
87378- set_fs(KERNEL_DS); \
87379 pagefault_disable(); \
87380- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
87381- pagefault_enable(); \
87382+ set_fs(KERNEL_DS); \
87383+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
87384 set_fs(old_fs); \
87385+ pagefault_enable(); \
87386 ret; \
87387 })
87388
87389diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
87390index 2d1f9b6..d7a9fce 100644
87391--- a/include/linux/uidgid.h
87392+++ b/include/linux/uidgid.h
87393@@ -175,4 +175,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
87394
87395 #endif /* CONFIG_USER_NS */
87396
87397+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
87398+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
87399+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
87400+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
87401+
87402 #endif /* _LINUX_UIDGID_H */
87403diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
87404index 32c0e83..671eb35 100644
87405--- a/include/linux/uio_driver.h
87406+++ b/include/linux/uio_driver.h
87407@@ -67,7 +67,7 @@ struct uio_device {
87408 struct module *owner;
87409 struct device *dev;
87410 int minor;
87411- atomic_t event;
87412+ atomic_unchecked_t event;
87413 struct fasync_struct *async_queue;
87414 wait_queue_head_t wait;
87415 struct uio_info *info;
87416diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
87417index 99c1b4d..562e6f3 100644
87418--- a/include/linux/unaligned/access_ok.h
87419+++ b/include/linux/unaligned/access_ok.h
87420@@ -4,34 +4,34 @@
87421 #include <linux/kernel.h>
87422 #include <asm/byteorder.h>
87423
87424-static inline u16 get_unaligned_le16(const void *p)
87425+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
87426 {
87427- return le16_to_cpup((__le16 *)p);
87428+ return le16_to_cpup((const __le16 *)p);
87429 }
87430
87431-static inline u32 get_unaligned_le32(const void *p)
87432+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
87433 {
87434- return le32_to_cpup((__le32 *)p);
87435+ return le32_to_cpup((const __le32 *)p);
87436 }
87437
87438-static inline u64 get_unaligned_le64(const void *p)
87439+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
87440 {
87441- return le64_to_cpup((__le64 *)p);
87442+ return le64_to_cpup((const __le64 *)p);
87443 }
87444
87445-static inline u16 get_unaligned_be16(const void *p)
87446+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
87447 {
87448- return be16_to_cpup((__be16 *)p);
87449+ return be16_to_cpup((const __be16 *)p);
87450 }
87451
87452-static inline u32 get_unaligned_be32(const void *p)
87453+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
87454 {
87455- return be32_to_cpup((__be32 *)p);
87456+ return be32_to_cpup((const __be32 *)p);
87457 }
87458
87459-static inline u64 get_unaligned_be64(const void *p)
87460+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
87461 {
87462- return be64_to_cpup((__be64 *)p);
87463+ return be64_to_cpup((const __be64 *)p);
87464 }
87465
87466 static inline void put_unaligned_le16(u16 val, void *p)
87467diff --git a/include/linux/usb.h b/include/linux/usb.h
87468index 447fe29..9fc875f 100644
87469--- a/include/linux/usb.h
87470+++ b/include/linux/usb.h
87471@@ -592,7 +592,7 @@ struct usb_device {
87472 int maxchild;
87473
87474 u32 quirks;
87475- atomic_t urbnum;
87476+ atomic_unchecked_t urbnum;
87477
87478 unsigned long active_duration;
87479
87480@@ -1676,7 +1676,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
87481
87482 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
87483 __u8 request, __u8 requesttype, __u16 value, __u16 index,
87484- void *data, __u16 size, int timeout);
87485+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
87486 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
87487 void *data, int len, int *actual_length, int timeout);
87488 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
87489diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
87490index 9fd9e48..e2c5f35 100644
87491--- a/include/linux/usb/renesas_usbhs.h
87492+++ b/include/linux/usb/renesas_usbhs.h
87493@@ -39,7 +39,7 @@ enum {
87494 */
87495 struct renesas_usbhs_driver_callback {
87496 int (*notify_hotplug)(struct platform_device *pdev);
87497-};
87498+} __no_const;
87499
87500 /*
87501 * callback functions for platform
87502diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
87503index 8297e5b..0dfae27 100644
87504--- a/include/linux/user_namespace.h
87505+++ b/include/linux/user_namespace.h
87506@@ -39,7 +39,7 @@ struct user_namespace {
87507 struct key *persistent_keyring_register;
87508 struct rw_semaphore persistent_keyring_register_sem;
87509 #endif
87510-};
87511+} __randomize_layout;
87512
87513 extern struct user_namespace init_user_ns;
87514
87515diff --git a/include/linux/utsname.h b/include/linux/utsname.h
87516index 5093f58..c103e58 100644
87517--- a/include/linux/utsname.h
87518+++ b/include/linux/utsname.h
87519@@ -25,7 +25,7 @@ struct uts_namespace {
87520 struct new_utsname name;
87521 struct user_namespace *user_ns;
87522 struct ns_common ns;
87523-};
87524+} __randomize_layout;
87525 extern struct uts_namespace init_uts_ns;
87526
87527 #ifdef CONFIG_UTS_NS
87528diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
87529index 6f8fbcf..4efc177 100644
87530--- a/include/linux/vermagic.h
87531+++ b/include/linux/vermagic.h
87532@@ -25,9 +25,42 @@
87533 #define MODULE_ARCH_VERMAGIC ""
87534 #endif
87535
87536+#ifdef CONFIG_PAX_REFCOUNT
87537+#define MODULE_PAX_REFCOUNT "REFCOUNT "
87538+#else
87539+#define MODULE_PAX_REFCOUNT ""
87540+#endif
87541+
87542+#ifdef CONSTIFY_PLUGIN
87543+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
87544+#else
87545+#define MODULE_CONSTIFY_PLUGIN ""
87546+#endif
87547+
87548+#ifdef STACKLEAK_PLUGIN
87549+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
87550+#else
87551+#define MODULE_STACKLEAK_PLUGIN ""
87552+#endif
87553+
87554+#ifdef RANDSTRUCT_PLUGIN
87555+#include <generated/randomize_layout_hash.h>
87556+#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
87557+#else
87558+#define MODULE_RANDSTRUCT_PLUGIN
87559+#endif
87560+
87561+#ifdef CONFIG_GRKERNSEC
87562+#define MODULE_GRSEC "GRSEC "
87563+#else
87564+#define MODULE_GRSEC ""
87565+#endif
87566+
87567 #define VERMAGIC_STRING \
87568 UTS_RELEASE " " \
87569 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
87570 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
87571- MODULE_ARCH_VERMAGIC
87572+ MODULE_ARCH_VERMAGIC \
87573+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
87574+ MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
87575
87576diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
87577index b483abd..af305ad 100644
87578--- a/include/linux/vga_switcheroo.h
87579+++ b/include/linux/vga_switcheroo.h
87580@@ -63,9 +63,9 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
87581
87582 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
87583
87584-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
87585+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
87586 void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
87587-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
87588+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
87589 #else
87590
87591 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
87592@@ -82,9 +82,9 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
87593
87594 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
87595
87596-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
87597+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
87598 static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
87599-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
87600+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
87601
87602 #endif
87603 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
87604diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
87605index 0ec5983..cc61051 100644
87606--- a/include/linux/vmalloc.h
87607+++ b/include/linux/vmalloc.h
87608@@ -18,6 +18,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
87609 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
87610 #define VM_NO_GUARD 0x00000040 /* don't add guard page */
87611 #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
87612+
87613+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
87614+#define VM_KERNEXEC 0x00000100 /* allocate from executable kernel memory range */
87615+#endif
87616+
87617 /* bits [20..32] reserved for arch specific ioremap internals */
87618
87619 /*
87620@@ -86,6 +91,10 @@ extern void *vmap(struct page **pages, unsigned int count,
87621 unsigned long flags, pgprot_t prot);
87622 extern void vunmap(const void *addr);
87623
87624+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
87625+extern void unmap_process_stacks(struct task_struct *task);
87626+#endif
87627+
87628 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
87629 unsigned long uaddr, void *kaddr,
87630 unsigned long size);
87631@@ -150,7 +159,7 @@ extern void free_vm_area(struct vm_struct *area);
87632
87633 /* for /dev/kmem */
87634 extern long vread(char *buf, char *addr, unsigned long count);
87635-extern long vwrite(char *buf, char *addr, unsigned long count);
87636+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
87637
87638 /*
87639 * Internals. Dont't use..
87640diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
87641index 82e7db7..f8ce3d0 100644
87642--- a/include/linux/vmstat.h
87643+++ b/include/linux/vmstat.h
87644@@ -108,18 +108,18 @@ static inline void vm_events_fold_cpu(int cpu)
87645 /*
87646 * Zone based page accounting with per cpu differentials.
87647 */
87648-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
87649+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
87650
87651 static inline void zone_page_state_add(long x, struct zone *zone,
87652 enum zone_stat_item item)
87653 {
87654- atomic_long_add(x, &zone->vm_stat[item]);
87655- atomic_long_add(x, &vm_stat[item]);
87656+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
87657+ atomic_long_add_unchecked(x, &vm_stat[item]);
87658 }
87659
87660-static inline unsigned long global_page_state(enum zone_stat_item item)
87661+static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
87662 {
87663- long x = atomic_long_read(&vm_stat[item]);
87664+ long x = atomic_long_read_unchecked(&vm_stat[item]);
87665 #ifdef CONFIG_SMP
87666 if (x < 0)
87667 x = 0;
87668@@ -127,10 +127,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
87669 return x;
87670 }
87671
87672-static inline unsigned long zone_page_state(struct zone *zone,
87673+static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
87674 enum zone_stat_item item)
87675 {
87676- long x = atomic_long_read(&zone->vm_stat[item]);
87677+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
87678 #ifdef CONFIG_SMP
87679 if (x < 0)
87680 x = 0;
87681@@ -147,7 +147,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
87682 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
87683 enum zone_stat_item item)
87684 {
87685- long x = atomic_long_read(&zone->vm_stat[item]);
87686+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
87687
87688 #ifdef CONFIG_SMP
87689 int cpu;
87690@@ -234,14 +234,14 @@ static inline void __mod_zone_page_state(struct zone *zone,
87691
87692 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
87693 {
87694- atomic_long_inc(&zone->vm_stat[item]);
87695- atomic_long_inc(&vm_stat[item]);
87696+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
87697+ atomic_long_inc_unchecked(&vm_stat[item]);
87698 }
87699
87700 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
87701 {
87702- atomic_long_dec(&zone->vm_stat[item]);
87703- atomic_long_dec(&vm_stat[item]);
87704+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
87705+ atomic_long_dec_unchecked(&vm_stat[item]);
87706 }
87707
87708 static inline void __inc_zone_page_state(struct page *page,
87709diff --git a/include/linux/xattr.h b/include/linux/xattr.h
87710index 91b0a68..0e9adf6 100644
87711--- a/include/linux/xattr.h
87712+++ b/include/linux/xattr.h
87713@@ -28,7 +28,7 @@ struct xattr_handler {
87714 size_t size, int handler_flags);
87715 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
87716 size_t size, int flags, int handler_flags);
87717-};
87718+} __do_const;
87719
87720 struct xattr {
87721 const char *name;
87722@@ -37,6 +37,9 @@ struct xattr {
87723 };
87724
87725 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
87726+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
87727+ssize_t pax_getxattr(struct dentry *, void *, size_t);
87728+#endif
87729 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
87730 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
87731 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
87732diff --git a/include/linux/zlib.h b/include/linux/zlib.h
87733index 92dbbd3..13ab0b3 100644
87734--- a/include/linux/zlib.h
87735+++ b/include/linux/zlib.h
87736@@ -31,6 +31,7 @@
87737 #define _ZLIB_H
87738
87739 #include <linux/zconf.h>
87740+#include <linux/compiler.h>
87741
87742 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
87743 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
87744@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
87745
87746 /* basic functions */
87747
87748-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
87749+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
87750 /*
87751 Returns the number of bytes that needs to be allocated for a per-
87752 stream workspace with the specified parameters. A pointer to this
87753diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
87754index 3e4fddf..5ec9104 100644
87755--- a/include/media/v4l2-dev.h
87756+++ b/include/media/v4l2-dev.h
87757@@ -75,7 +75,7 @@ struct v4l2_file_operations {
87758 int (*mmap) (struct file *, struct vm_area_struct *);
87759 int (*open) (struct file *);
87760 int (*release) (struct file *);
87761-};
87762+} __do_const;
87763
87764 /*
87765 * Newer version of video_device, handled by videodev2.c
87766diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
87767index ffb69da..040393e 100644
87768--- a/include/media/v4l2-device.h
87769+++ b/include/media/v4l2-device.h
87770@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
87771 this function returns 0. If the name ends with a digit (e.g. cx18),
87772 then the name will be set to cx18-0 since cx180 looks really odd. */
87773 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
87774- atomic_t *instance);
87775+ atomic_unchecked_t *instance);
87776
87777 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
87778 Since the parent disappears this ensures that v4l2_dev doesn't have an
87779diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
87780index 2a25dec..bf6dd8a 100644
87781--- a/include/net/9p/transport.h
87782+++ b/include/net/9p/transport.h
87783@@ -62,7 +62,7 @@ struct p9_trans_module {
87784 int (*cancelled)(struct p9_client *, struct p9_req_t *req);
87785 int (*zc_request)(struct p9_client *, struct p9_req_t *,
87786 char *, char *, int , int, int, int);
87787-};
87788+} __do_const;
87789
87790 void v9fs_register_trans(struct p9_trans_module *m);
87791 void v9fs_unregister_trans(struct p9_trans_module *m);
87792diff --git a/include/net/af_unix.h b/include/net/af_unix.h
87793index a175ba4..196eb8242 100644
87794--- a/include/net/af_unix.h
87795+++ b/include/net/af_unix.h
87796@@ -36,7 +36,7 @@ struct unix_skb_parms {
87797 u32 secid; /* Security ID */
87798 #endif
87799 u32 consumed;
87800-};
87801+} __randomize_layout;
87802
87803 #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
87804 #define UNIXSID(skb) (&UNIXCB((skb)).secid)
87805diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
87806index 2239a37..a83461f 100644
87807--- a/include/net/bluetooth/l2cap.h
87808+++ b/include/net/bluetooth/l2cap.h
87809@@ -609,7 +609,7 @@ struct l2cap_ops {
87810 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
87811 unsigned long hdr_len,
87812 unsigned long len, int nb);
87813-};
87814+} __do_const;
87815
87816 struct l2cap_conn {
87817 struct hci_conn *hcon;
87818diff --git a/include/net/bonding.h b/include/net/bonding.h
87819index fda6fee..dbdf83c 100644
87820--- a/include/net/bonding.h
87821+++ b/include/net/bonding.h
87822@@ -665,7 +665,7 @@ extern struct rtnl_link_ops bond_link_ops;
87823
87824 static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
87825 {
87826- atomic_long_inc(&dev->tx_dropped);
87827+ atomic_long_inc_unchecked(&dev->tx_dropped);
87828 dev_kfree_skb_any(skb);
87829 }
87830
87831diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
87832index f2ae33d..c457cf0 100644
87833--- a/include/net/caif/cfctrl.h
87834+++ b/include/net/caif/cfctrl.h
87835@@ -52,7 +52,7 @@ struct cfctrl_rsp {
87836 void (*radioset_rsp)(void);
87837 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
87838 struct cflayer *client_layer);
87839-};
87840+} __no_const;
87841
87842 /* Link Setup Parameters for CAIF-Links. */
87843 struct cfctrl_link_param {
87844@@ -101,8 +101,8 @@ struct cfctrl_request_info {
87845 struct cfctrl {
87846 struct cfsrvl serv;
87847 struct cfctrl_rsp res;
87848- atomic_t req_seq_no;
87849- atomic_t rsp_seq_no;
87850+ atomic_unchecked_t req_seq_no;
87851+ atomic_unchecked_t rsp_seq_no;
87852 struct list_head list;
87853 /* Protects from simultaneous access to first_req list */
87854 spinlock_t info_list_lock;
87855diff --git a/include/net/flow.h b/include/net/flow.h
87856index 8109a15..504466d 100644
87857--- a/include/net/flow.h
87858+++ b/include/net/flow.h
87859@@ -231,6 +231,6 @@ void flow_cache_fini(struct net *net);
87860
87861 void flow_cache_flush(struct net *net);
87862 void flow_cache_flush_deferred(struct net *net);
87863-extern atomic_t flow_cache_genid;
87864+extern atomic_unchecked_t flow_cache_genid;
87865
87866 #endif
87867diff --git a/include/net/genetlink.h b/include/net/genetlink.h
87868index 0574abd..0f16881 100644
87869--- a/include/net/genetlink.h
87870+++ b/include/net/genetlink.h
87871@@ -130,7 +130,7 @@ struct genl_ops {
87872 u8 cmd;
87873 u8 internal_flags;
87874 u8 flags;
87875-};
87876+} __do_const;
87877
87878 int __genl_register_family(struct genl_family *family);
87879
87880diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
87881index 0f712c0..cd762c4 100644
87882--- a/include/net/gro_cells.h
87883+++ b/include/net/gro_cells.h
87884@@ -27,7 +27,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
87885 cell = this_cpu_ptr(gcells->cells);
87886
87887 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
87888- atomic_long_inc(&dev->rx_dropped);
87889+ atomic_long_inc_unchecked(&dev->rx_dropped);
87890 kfree_skb(skb);
87891 return;
87892 }
87893diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
87894index 5976bde..3a81660 100644
87895--- a/include/net/inet_connection_sock.h
87896+++ b/include/net/inet_connection_sock.h
87897@@ -63,7 +63,7 @@ struct inet_connection_sock_af_ops {
87898 int (*bind_conflict)(const struct sock *sk,
87899 const struct inet_bind_bucket *tb, bool relax);
87900 void (*mtu_reduced)(struct sock *sk);
87901-};
87902+} __do_const;
87903
87904 /** inet_connection_sock - INET connection oriented sock
87905 *
87906diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
87907index 80479ab..0c3f647 100644
87908--- a/include/net/inetpeer.h
87909+++ b/include/net/inetpeer.h
87910@@ -47,7 +47,7 @@ struct inet_peer {
87911 */
87912 union {
87913 struct {
87914- atomic_t rid; /* Frag reception counter */
87915+ atomic_unchecked_t rid; /* Frag reception counter */
87916 };
87917 struct rcu_head rcu;
87918 struct inet_peer *gc_next;
87919diff --git a/include/net/ip.h b/include/net/ip.h
87920index 6cc1eaf..14059b0 100644
87921--- a/include/net/ip.h
87922+++ b/include/net/ip.h
87923@@ -317,7 +317,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
87924 }
87925 }
87926
87927-u32 ip_idents_reserve(u32 hash, int segs);
87928+u32 ip_idents_reserve(u32 hash, int segs) __intentional_overflow(-1);
87929 void __ip_select_ident(struct iphdr *iph, int segs);
87930
87931 static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
87932diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
87933index 5bd120e4..03fb812 100644
87934--- a/include/net/ip_fib.h
87935+++ b/include/net/ip_fib.h
87936@@ -170,7 +170,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
87937
87938 #define FIB_RES_SADDR(net, res) \
87939 ((FIB_RES_NH(res).nh_saddr_genid == \
87940- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
87941+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
87942 FIB_RES_NH(res).nh_saddr : \
87943 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
87944 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
87945diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
87946index 615b20b..fd4cbd8 100644
87947--- a/include/net/ip_vs.h
87948+++ b/include/net/ip_vs.h
87949@@ -534,7 +534,7 @@ struct ip_vs_conn {
87950 struct ip_vs_conn *control; /* Master control connection */
87951 atomic_t n_control; /* Number of controlled ones */
87952 struct ip_vs_dest *dest; /* real server */
87953- atomic_t in_pkts; /* incoming packet counter */
87954+ atomic_unchecked_t in_pkts; /* incoming packet counter */
87955
87956 /* Packet transmitter for different forwarding methods. If it
87957 * mangles the packet, it must return NF_DROP or better NF_STOLEN,
87958@@ -682,7 +682,7 @@ struct ip_vs_dest {
87959 __be16 port; /* port number of the server */
87960 union nf_inet_addr addr; /* IP address of the server */
87961 volatile unsigned int flags; /* dest status flags */
87962- atomic_t conn_flags; /* flags to copy to conn */
87963+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
87964 atomic_t weight; /* server weight */
87965
87966 atomic_t refcnt; /* reference counter */
87967@@ -928,11 +928,11 @@ struct netns_ipvs {
87968 /* ip_vs_lblc */
87969 int sysctl_lblc_expiration;
87970 struct ctl_table_header *lblc_ctl_header;
87971- struct ctl_table *lblc_ctl_table;
87972+ ctl_table_no_const *lblc_ctl_table;
87973 /* ip_vs_lblcr */
87974 int sysctl_lblcr_expiration;
87975 struct ctl_table_header *lblcr_ctl_header;
87976- struct ctl_table *lblcr_ctl_table;
87977+ ctl_table_no_const *lblcr_ctl_table;
87978 /* ip_vs_est */
87979 struct list_head est_list; /* estimator list */
87980 spinlock_t est_lock;
87981diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
87982index 8d4f588..2e37ad2 100644
87983--- a/include/net/irda/ircomm_tty.h
87984+++ b/include/net/irda/ircomm_tty.h
87985@@ -33,6 +33,7 @@
87986 #include <linux/termios.h>
87987 #include <linux/timer.h>
87988 #include <linux/tty.h> /* struct tty_struct */
87989+#include <asm/local.h>
87990
87991 #include <net/irda/irias_object.h>
87992 #include <net/irda/ircomm_core.h>
87993diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
87994index 714cc9a..ea05f3e 100644
87995--- a/include/net/iucv/af_iucv.h
87996+++ b/include/net/iucv/af_iucv.h
87997@@ -149,7 +149,7 @@ struct iucv_skb_cb {
87998 struct iucv_sock_list {
87999 struct hlist_head head;
88000 rwlock_t lock;
88001- atomic_t autobind_name;
88002+ atomic_unchecked_t autobind_name;
88003 };
88004
88005 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
88006diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
88007index f3be818..bf46196 100644
88008--- a/include/net/llc_c_ac.h
88009+++ b/include/net/llc_c_ac.h
88010@@ -87,7 +87,7 @@
88011 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
88012 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
88013
88014-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
88015+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
88016
88017 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
88018 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
88019diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
88020index 3948cf1..83b28c4 100644
88021--- a/include/net/llc_c_ev.h
88022+++ b/include/net/llc_c_ev.h
88023@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
88024 return (struct llc_conn_state_ev *)skb->cb;
88025 }
88026
88027-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
88028-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
88029+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
88030+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
88031
88032 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
88033 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
88034diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
88035index 48f3f89..0e92c50 100644
88036--- a/include/net/llc_c_st.h
88037+++ b/include/net/llc_c_st.h
88038@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
88039 u8 next_state;
88040 const llc_conn_ev_qfyr_t *ev_qualifiers;
88041 const llc_conn_action_t *ev_actions;
88042-};
88043+} __do_const;
88044
88045 struct llc_conn_state {
88046 u8 current_state;
88047diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
88048index a61b98c..aade1eb 100644
88049--- a/include/net/llc_s_ac.h
88050+++ b/include/net/llc_s_ac.h
88051@@ -23,7 +23,7 @@
88052 #define SAP_ACT_TEST_IND 9
88053
88054 /* All action functions must look like this */
88055-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
88056+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
88057
88058 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
88059 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
88060diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
88061index c4359e2..76dbc4a 100644
88062--- a/include/net/llc_s_st.h
88063+++ b/include/net/llc_s_st.h
88064@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
88065 llc_sap_ev_t ev;
88066 u8 next_state;
88067 const llc_sap_action_t *ev_actions;
88068-};
88069+} __do_const;
88070
88071 struct llc_sap_state {
88072 u8 curr_state;
88073diff --git a/include/net/mac80211.h b/include/net/mac80211.h
88074index d52914b..2b13cec 100644
88075--- a/include/net/mac80211.h
88076+++ b/include/net/mac80211.h
88077@@ -4915,7 +4915,7 @@ struct rate_control_ops {
88078 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
88079
88080 u32 (*get_expected_throughput)(void *priv_sta);
88081-};
88082+} __do_const;
88083
88084 static inline int rate_supported(struct ieee80211_sta *sta,
88085 enum ieee80211_band band,
88086diff --git a/include/net/neighbour.h b/include/net/neighbour.h
88087index 76f7084..8f36e39 100644
88088--- a/include/net/neighbour.h
88089+++ b/include/net/neighbour.h
88090@@ -163,7 +163,7 @@ struct neigh_ops {
88091 void (*error_report)(struct neighbour *, struct sk_buff *);
88092 int (*output)(struct neighbour *, struct sk_buff *);
88093 int (*connected_output)(struct neighbour *, struct sk_buff *);
88094-};
88095+} __do_const;
88096
88097 struct pneigh_entry {
88098 struct pneigh_entry *next;
88099@@ -217,7 +217,7 @@ struct neigh_table {
88100 struct neigh_statistics __percpu *stats;
88101 struct neigh_hash_table __rcu *nht;
88102 struct pneigh_entry **phash_buckets;
88103-};
88104+} __randomize_layout;
88105
88106 enum {
88107 NEIGH_ARP_TABLE = 0,
88108diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
88109index 36faf49..6927638 100644
88110--- a/include/net/net_namespace.h
88111+++ b/include/net/net_namespace.h
88112@@ -131,8 +131,8 @@ struct net {
88113 struct netns_ipvs *ipvs;
88114 #endif
88115 struct sock *diag_nlsk;
88116- atomic_t fnhe_genid;
88117-};
88118+ atomic_unchecked_t fnhe_genid;
88119+} __randomize_layout;
88120
88121 #include <linux/seq_file_net.h>
88122
88123@@ -288,7 +288,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
88124 #define __net_init __init
88125 #define __net_exit __exit_refok
88126 #define __net_initdata __initdata
88127+#ifdef CONSTIFY_PLUGIN
88128 #define __net_initconst __initconst
88129+#else
88130+#define __net_initconst __initdata
88131+#endif
88132 #endif
88133
88134 int peernet2id(struct net *net, struct net *peer);
88135@@ -301,7 +305,7 @@ struct pernet_operations {
88136 void (*exit_batch)(struct list_head *net_exit_list);
88137 int *id;
88138 size_t size;
88139-};
88140+} __do_const;
88141
88142 /*
88143 * Use these carefully. If you implement a network device and it
88144@@ -349,12 +353,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
88145
88146 static inline int rt_genid_ipv4(struct net *net)
88147 {
88148- return atomic_read(&net->ipv4.rt_genid);
88149+ return atomic_read_unchecked(&net->ipv4.rt_genid);
88150 }
88151
88152 static inline void rt_genid_bump_ipv4(struct net *net)
88153 {
88154- atomic_inc(&net->ipv4.rt_genid);
88155+ atomic_inc_unchecked(&net->ipv4.rt_genid);
88156 }
88157
88158 extern void (*__fib6_flush_trees)(struct net *net);
88159@@ -381,12 +385,12 @@ static inline void rt_genid_bump_all(struct net *net)
88160
88161 static inline int fnhe_genid(struct net *net)
88162 {
88163- return atomic_read(&net->fnhe_genid);
88164+ return atomic_read_unchecked(&net->fnhe_genid);
88165 }
88166
88167 static inline void fnhe_genid_bump(struct net *net)
88168 {
88169- atomic_inc(&net->fnhe_genid);
88170+ atomic_inc_unchecked(&net->fnhe_genid);
88171 }
88172
88173 #endif /* __NET_NET_NAMESPACE_H */
88174diff --git a/include/net/netlink.h b/include/net/netlink.h
88175index e010ee8..405b9f4 100644
88176--- a/include/net/netlink.h
88177+++ b/include/net/netlink.h
88178@@ -518,7 +518,7 @@ static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
88179 {
88180 if (mark) {
88181 WARN_ON((unsigned char *) mark < skb->data);
88182- skb_trim(skb, (unsigned char *) mark - skb->data);
88183+ skb_trim(skb, (const unsigned char *) mark - skb->data);
88184 }
88185 }
88186
88187diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
88188index 29d6a94..235d3d84 100644
88189--- a/include/net/netns/conntrack.h
88190+++ b/include/net/netns/conntrack.h
88191@@ -14,10 +14,10 @@ struct nf_conntrack_ecache;
88192 struct nf_proto_net {
88193 #ifdef CONFIG_SYSCTL
88194 struct ctl_table_header *ctl_table_header;
88195- struct ctl_table *ctl_table;
88196+ ctl_table_no_const *ctl_table;
88197 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
88198 struct ctl_table_header *ctl_compat_header;
88199- struct ctl_table *ctl_compat_table;
88200+ ctl_table_no_const *ctl_compat_table;
88201 #endif
88202 #endif
88203 unsigned int users;
88204@@ -60,7 +60,7 @@ struct nf_ip_net {
88205 struct nf_icmp_net icmpv6;
88206 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
88207 struct ctl_table_header *ctl_table_header;
88208- struct ctl_table *ctl_table;
88209+ ctl_table_no_const *ctl_table;
88210 #endif
88211 };
88212
88213diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
88214index dbe2254..ed0c151 100644
88215--- a/include/net/netns/ipv4.h
88216+++ b/include/net/netns/ipv4.h
88217@@ -87,7 +87,7 @@ struct netns_ipv4 {
88218
88219 struct ping_group_range ping_group_range;
88220
88221- atomic_t dev_addr_genid;
88222+ atomic_unchecked_t dev_addr_genid;
88223
88224 #ifdef CONFIG_SYSCTL
88225 unsigned long *sysctl_local_reserved_ports;
88226@@ -101,6 +101,6 @@ struct netns_ipv4 {
88227 struct fib_rules_ops *mr_rules_ops;
88228 #endif
88229 #endif
88230- atomic_t rt_genid;
88231+ atomic_unchecked_t rt_genid;
88232 };
88233 #endif
88234diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
88235index 69ae41f..4f94868 100644
88236--- a/include/net/netns/ipv6.h
88237+++ b/include/net/netns/ipv6.h
88238@@ -75,8 +75,8 @@ struct netns_ipv6 {
88239 struct fib_rules_ops *mr6_rules_ops;
88240 #endif
88241 #endif
88242- atomic_t dev_addr_genid;
88243- atomic_t fib6_sernum;
88244+ atomic_unchecked_t dev_addr_genid;
88245+ atomic_unchecked_t fib6_sernum;
88246 };
88247
88248 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
88249diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
88250index 730d82a..045f2c4 100644
88251--- a/include/net/netns/xfrm.h
88252+++ b/include/net/netns/xfrm.h
88253@@ -78,7 +78,7 @@ struct netns_xfrm {
88254
88255 /* flow cache part */
88256 struct flow_cache flow_cache_global;
88257- atomic_t flow_cache_genid;
88258+ atomic_unchecked_t flow_cache_genid;
88259 struct list_head flow_cache_gc_list;
88260 spinlock_t flow_cache_gc_lock;
88261 struct work_struct flow_cache_gc_work;
88262diff --git a/include/net/ping.h b/include/net/ping.h
88263index cc16d41..664f40b 100644
88264--- a/include/net/ping.h
88265+++ b/include/net/ping.h
88266@@ -54,7 +54,7 @@ struct ping_iter_state {
88267
88268 extern struct proto ping_prot;
88269 #if IS_ENABLED(CONFIG_IPV6)
88270-extern struct pingv6_ops pingv6_ops;
88271+extern struct pingv6_ops *pingv6_ops;
88272 #endif
88273
88274 struct pingfakehdr {
88275diff --git a/include/net/protocol.h b/include/net/protocol.h
88276index d6fcc1f..ca277058 100644
88277--- a/include/net/protocol.h
88278+++ b/include/net/protocol.h
88279@@ -49,7 +49,7 @@ struct net_protocol {
88280 * socket lookup?
88281 */
88282 icmp_strict_tag_validation:1;
88283-};
88284+} __do_const;
88285
88286 #if IS_ENABLED(CONFIG_IPV6)
88287 struct inet6_protocol {
88288@@ -62,7 +62,7 @@ struct inet6_protocol {
88289 u8 type, u8 code, int offset,
88290 __be32 info);
88291 unsigned int flags; /* INET6_PROTO_xxx */
88292-};
88293+} __do_const;
88294
88295 #define INET6_PROTO_NOPOLICY 0x1
88296 #define INET6_PROTO_FINAL 0x2
88297diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
88298index 6c6d539..af70817 100644
88299--- a/include/net/rtnetlink.h
88300+++ b/include/net/rtnetlink.h
88301@@ -95,7 +95,7 @@ struct rtnl_link_ops {
88302 const struct net_device *dev,
88303 const struct net_device *slave_dev);
88304 struct net *(*get_link_net)(const struct net_device *dev);
88305-};
88306+} __do_const;
88307
88308 int __rtnl_link_register(struct rtnl_link_ops *ops);
88309 void __rtnl_link_unregister(struct rtnl_link_ops *ops);
88310diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
88311index 4a5b9a3..ca27d73 100644
88312--- a/include/net/sctp/checksum.h
88313+++ b/include/net/sctp/checksum.h
88314@@ -61,8 +61,8 @@ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
88315 unsigned int offset)
88316 {
88317 struct sctphdr *sh = sctp_hdr(skb);
88318- __le32 ret, old = sh->checksum;
88319- const struct skb_checksum_ops ops = {
88320+ __le32 ret, old = sh->checksum;
88321+ static const struct skb_checksum_ops ops = {
88322 .update = sctp_csum_update,
88323 .combine = sctp_csum_combine,
88324 };
88325diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
88326index 487ef34..d457f98 100644
88327--- a/include/net/sctp/sm.h
88328+++ b/include/net/sctp/sm.h
88329@@ -80,7 +80,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
88330 typedef struct {
88331 sctp_state_fn_t *fn;
88332 const char *name;
88333-} sctp_sm_table_entry_t;
88334+} __do_const sctp_sm_table_entry_t;
88335
88336 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
88337 * currently in use.
88338@@ -292,7 +292,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
88339 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
88340
88341 /* Extern declarations for major data structures. */
88342-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
88343+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
88344
88345
88346 /* Get the size of a DATA chunk payload. */
88347diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
88348index 2bb2fcf..d17c291 100644
88349--- a/include/net/sctp/structs.h
88350+++ b/include/net/sctp/structs.h
88351@@ -509,7 +509,7 @@ struct sctp_pf {
88352 void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
88353 void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
88354 struct sctp_af *af;
88355-};
88356+} __do_const;
88357
88358
88359 /* Structure to track chunk fragments that have been acked, but peer
88360diff --git a/include/net/sock.h b/include/net/sock.h
88361index e4079c2..79c5d3a 100644
88362--- a/include/net/sock.h
88363+++ b/include/net/sock.h
88364@@ -362,7 +362,7 @@ struct sock {
88365 unsigned int sk_napi_id;
88366 unsigned int sk_ll_usec;
88367 #endif
88368- atomic_t sk_drops;
88369+ atomic_unchecked_t sk_drops;
88370 int sk_rcvbuf;
88371
88372 struct sk_filter __rcu *sk_filter;
88373@@ -1039,7 +1039,7 @@ struct proto {
88374 void (*destroy_cgroup)(struct mem_cgroup *memcg);
88375 struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
88376 #endif
88377-};
88378+} __randomize_layout;
88379
88380 /*
88381 * Bits in struct cg_proto.flags
88382@@ -1212,7 +1212,7 @@ static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
88383 page_counter_uncharge(&prot->memory_allocated, amt);
88384 }
88385
88386-static inline long
88387+static inline long __intentional_overflow(-1)
88388 sk_memory_allocated(const struct sock *sk)
88389 {
88390 struct proto *prot = sk->sk_prot;
88391@@ -1778,7 +1778,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
88392 }
88393
88394 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
88395- struct iov_iter *from, char *to,
88396+ struct iov_iter *from, unsigned char *to,
88397 int copy, int offset)
88398 {
88399 if (skb->ip_summed == CHECKSUM_NONE) {
88400@@ -2025,7 +2025,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
88401 }
88402 }
88403
88404-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
88405+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
88406
88407 /**
88408 * sk_page_frag - return an appropriate page_frag
88409diff --git a/include/net/tcp.h b/include/net/tcp.h
88410index 8d6b983..5813205 100644
88411--- a/include/net/tcp.h
88412+++ b/include/net/tcp.h
88413@@ -516,7 +516,7 @@ void tcp_retransmit_timer(struct sock *sk);
88414 void tcp_xmit_retransmit_queue(struct sock *);
88415 void tcp_simple_retransmit(struct sock *);
88416 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
88417-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
88418+int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
88419
88420 void tcp_send_probe0(struct sock *);
88421 void tcp_send_partial(struct sock *);
88422@@ -694,8 +694,8 @@ static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
88423 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
88424 */
88425 struct tcp_skb_cb {
88426- __u32 seq; /* Starting sequence number */
88427- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
88428+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
88429+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
88430 union {
88431 /* Note : tcp_tw_isn is used in input path only
88432 * (isn chosen by tcp_timewait_state_process())
88433@@ -720,7 +720,7 @@ struct tcp_skb_cb {
88434
88435 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
88436 /* 1 byte hole */
88437- __u32 ack_seq; /* Sequence number ACK'd */
88438+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
88439 union {
88440 struct inet_skb_parm h4;
88441 #if IS_ENABLED(CONFIG_IPV6)
88442diff --git a/include/net/xfrm.h b/include/net/xfrm.h
88443index dc4865e..152ee4c 100644
88444--- a/include/net/xfrm.h
88445+++ b/include/net/xfrm.h
88446@@ -285,7 +285,6 @@ struct xfrm_dst;
88447 struct xfrm_policy_afinfo {
88448 unsigned short family;
88449 struct dst_ops *dst_ops;
88450- void (*garbage_collect)(struct net *net);
88451 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
88452 const xfrm_address_t *saddr,
88453 const xfrm_address_t *daddr);
88454@@ -303,7 +302,7 @@ struct xfrm_policy_afinfo {
88455 struct net_device *dev,
88456 const struct flowi *fl);
88457 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
88458-};
88459+} __do_const;
88460
88461 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
88462 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
88463@@ -342,7 +341,7 @@ struct xfrm_state_afinfo {
88464 int (*transport_finish)(struct sk_buff *skb,
88465 int async);
88466 void (*local_error)(struct sk_buff *skb, u32 mtu);
88467-};
88468+} __do_const;
88469
88470 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
88471 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
88472@@ -437,7 +436,7 @@ struct xfrm_mode {
88473 struct module *owner;
88474 unsigned int encap;
88475 int flags;
88476-};
88477+} __do_const;
88478
88479 /* Flags for xfrm_mode. */
88480 enum {
88481@@ -534,7 +533,7 @@ struct xfrm_policy {
88482 struct timer_list timer;
88483
88484 struct flow_cache_object flo;
88485- atomic_t genid;
88486+ atomic_unchecked_t genid;
88487 u32 priority;
88488 u32 index;
88489 struct xfrm_mark mark;
88490@@ -1167,6 +1166,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
88491 }
88492
88493 void xfrm_garbage_collect(struct net *net);
88494+void xfrm_garbage_collect_deferred(struct net *net);
88495
88496 #else
88497
88498@@ -1205,6 +1205,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
88499 static inline void xfrm_garbage_collect(struct net *net)
88500 {
88501 }
88502+static inline void xfrm_garbage_collect_deferred(struct net *net)
88503+{
88504+}
88505 #endif
88506
88507 static __inline__
88508diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
88509index 1017e0b..227aa4d 100644
88510--- a/include/rdma/iw_cm.h
88511+++ b/include/rdma/iw_cm.h
88512@@ -122,7 +122,7 @@ struct iw_cm_verbs {
88513 int backlog);
88514
88515 int (*destroy_listen)(struct iw_cm_id *cm_id);
88516-};
88517+} __no_const;
88518
88519 /**
88520 * iw_create_cm_id - Create an IW CM identifier.
88521diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
88522index 93d14da..734b3d8 100644
88523--- a/include/scsi/libfc.h
88524+++ b/include/scsi/libfc.h
88525@@ -771,6 +771,7 @@ struct libfc_function_template {
88526 */
88527 void (*disc_stop_final) (struct fc_lport *);
88528 };
88529+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
88530
88531 /**
88532 * struct fc_disc - Discovery context
88533@@ -875,7 +876,7 @@ struct fc_lport {
88534 struct fc_vport *vport;
88535
88536 /* Operational Information */
88537- struct libfc_function_template tt;
88538+ libfc_function_template_no_const tt;
88539 u8 link_up;
88540 u8 qfull;
88541 enum fc_lport_state state;
88542diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
88543index a4c9336..d6f8f34 100644
88544--- a/include/scsi/scsi_device.h
88545+++ b/include/scsi/scsi_device.h
88546@@ -185,9 +185,9 @@ struct scsi_device {
88547 unsigned int max_device_blocked; /* what device_blocked counts down from */
88548 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
88549
88550- atomic_t iorequest_cnt;
88551- atomic_t iodone_cnt;
88552- atomic_t ioerr_cnt;
88553+ atomic_unchecked_t iorequest_cnt;
88554+ atomic_unchecked_t iodone_cnt;
88555+ atomic_unchecked_t ioerr_cnt;
88556
88557 struct device sdev_gendev,
88558 sdev_dev;
88559diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
88560index 007a0bc..7188db8 100644
88561--- a/include/scsi/scsi_transport_fc.h
88562+++ b/include/scsi/scsi_transport_fc.h
88563@@ -756,7 +756,8 @@ struct fc_function_template {
88564 unsigned long show_host_system_hostname:1;
88565
88566 unsigned long disable_target_scan:1;
88567-};
88568+} __do_const;
88569+typedef struct fc_function_template __no_const fc_function_template_no_const;
88570
88571
88572 /**
88573diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
88574index f48089d..73abe48 100644
88575--- a/include/sound/compress_driver.h
88576+++ b/include/sound/compress_driver.h
88577@@ -130,7 +130,7 @@ struct snd_compr_ops {
88578 struct snd_compr_caps *caps);
88579 int (*get_codec_caps) (struct snd_compr_stream *stream,
88580 struct snd_compr_codec_caps *codec);
88581-};
88582+} __no_const;
88583
88584 /**
88585 * struct snd_compr: Compressed device
88586diff --git a/include/sound/soc.h b/include/sound/soc.h
88587index 0d1ade1..34e77d3 100644
88588--- a/include/sound/soc.h
88589+++ b/include/sound/soc.h
88590@@ -856,7 +856,7 @@ struct snd_soc_codec_driver {
88591 enum snd_soc_dapm_type, int);
88592
88593 bool ignore_pmdown_time; /* Doesn't benefit from pmdown delay */
88594-};
88595+} __do_const;
88596
88597 /* SoC platform interface */
88598 struct snd_soc_platform_driver {
88599@@ -883,7 +883,7 @@ struct snd_soc_platform_driver {
88600 const struct snd_compr_ops *compr_ops;
88601
88602 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
88603-};
88604+} __do_const;
88605
88606 struct snd_soc_dai_link_component {
88607 const char *name;
88608diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
88609index 985ca4c..b55b54a 100644
88610--- a/include/target/target_core_base.h
88611+++ b/include/target/target_core_base.h
88612@@ -767,7 +767,7 @@ struct se_device {
88613 atomic_long_t write_bytes;
88614 /* Active commands on this virtual SE device */
88615 atomic_t simple_cmds;
88616- atomic_t dev_ordered_id;
88617+ atomic_unchecked_t dev_ordered_id;
88618 atomic_t dev_ordered_sync;
88619 atomic_t dev_qf_count;
88620 int export_count;
88621diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
88622new file mode 100644
88623index 0000000..fb634b7
88624--- /dev/null
88625+++ b/include/trace/events/fs.h
88626@@ -0,0 +1,53 @@
88627+#undef TRACE_SYSTEM
88628+#define TRACE_SYSTEM fs
88629+
88630+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
88631+#define _TRACE_FS_H
88632+
88633+#include <linux/fs.h>
88634+#include <linux/tracepoint.h>
88635+
88636+TRACE_EVENT(do_sys_open,
88637+
88638+ TP_PROTO(const char *filename, int flags, int mode),
88639+
88640+ TP_ARGS(filename, flags, mode),
88641+
88642+ TP_STRUCT__entry(
88643+ __string( filename, filename )
88644+ __field( int, flags )
88645+ __field( int, mode )
88646+ ),
88647+
88648+ TP_fast_assign(
88649+ __assign_str(filename, filename);
88650+ __entry->flags = flags;
88651+ __entry->mode = mode;
88652+ ),
88653+
88654+ TP_printk("\"%s\" %x %o",
88655+ __get_str(filename), __entry->flags, __entry->mode)
88656+);
88657+
88658+TRACE_EVENT(open_exec,
88659+
88660+ TP_PROTO(const char *filename),
88661+
88662+ TP_ARGS(filename),
88663+
88664+ TP_STRUCT__entry(
88665+ __string( filename, filename )
88666+ ),
88667+
88668+ TP_fast_assign(
88669+ __assign_str(filename, filename);
88670+ ),
88671+
88672+ TP_printk("\"%s\"",
88673+ __get_str(filename))
88674+);
88675+
88676+#endif /* _TRACE_FS_H */
88677+
88678+/* This part must be outside protection */
88679+#include <trace/define_trace.h>
88680diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
88681index 3608beb..df39d8a 100644
88682--- a/include/trace/events/irq.h
88683+++ b/include/trace/events/irq.h
88684@@ -36,7 +36,7 @@ struct softirq_action;
88685 */
88686 TRACE_EVENT(irq_handler_entry,
88687
88688- TP_PROTO(int irq, struct irqaction *action),
88689+ TP_PROTO(int irq, const struct irqaction *action),
88690
88691 TP_ARGS(irq, action),
88692
88693@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
88694 */
88695 TRACE_EVENT(irq_handler_exit,
88696
88697- TP_PROTO(int irq, struct irqaction *action, int ret),
88698+ TP_PROTO(int irq, const struct irqaction *action, int ret),
88699
88700 TP_ARGS(irq, action, ret),
88701
88702diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
88703index 6eed16b..3e05750 100644
88704--- a/include/uapi/drm/i915_drm.h
88705+++ b/include/uapi/drm/i915_drm.h
88706@@ -347,6 +347,7 @@ typedef struct drm_i915_irq_wait {
88707 #define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
88708 #define I915_PARAM_MMAP_VERSION 30
88709 #define I915_PARAM_HAS_BSD2 31
88710+#define I915_PARAM_HAS_LEGACY_CONTEXT 35
88711
88712 typedef struct drm_i915_getparam {
88713 int param;
88714diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
88715index 7caf44c..23c6f27 100644
88716--- a/include/uapi/linux/a.out.h
88717+++ b/include/uapi/linux/a.out.h
88718@@ -39,6 +39,14 @@ enum machine_type {
88719 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
88720 };
88721
88722+/* Constants for the N_FLAGS field */
88723+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
88724+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
88725+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
88726+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
88727+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
88728+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
88729+
88730 #if !defined (N_MAGIC)
88731 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
88732 #endif
88733diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
88734index 22b6ad3..aeba37e 100644
88735--- a/include/uapi/linux/bcache.h
88736+++ b/include/uapi/linux/bcache.h
88737@@ -5,6 +5,7 @@
88738 * Bcache on disk data structures
88739 */
88740
88741+#include <linux/compiler.h>
88742 #include <asm/types.h>
88743
88744 #define BITMASK(name, type, field, offset, size) \
88745@@ -20,8 +21,8 @@ static inline void SET_##name(type *k, __u64 v) \
88746 /* Btree keys - all units are in sectors */
88747
88748 struct bkey {
88749- __u64 high;
88750- __u64 low;
88751+ __u64 high __intentional_overflow(-1);
88752+ __u64 low __intentional_overflow(-1);
88753 __u64 ptr[];
88754 };
88755
88756diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
88757index d876736..ccce5c0 100644
88758--- a/include/uapi/linux/byteorder/little_endian.h
88759+++ b/include/uapi/linux/byteorder/little_endian.h
88760@@ -42,51 +42,51 @@
88761
88762 static inline __le64 __cpu_to_le64p(const __u64 *p)
88763 {
88764- return (__force __le64)*p;
88765+ return (__force const __le64)*p;
88766 }
88767-static inline __u64 __le64_to_cpup(const __le64 *p)
88768+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
88769 {
88770- return (__force __u64)*p;
88771+ return (__force const __u64)*p;
88772 }
88773 static inline __le32 __cpu_to_le32p(const __u32 *p)
88774 {
88775- return (__force __le32)*p;
88776+ return (__force const __le32)*p;
88777 }
88778 static inline __u32 __le32_to_cpup(const __le32 *p)
88779 {
88780- return (__force __u32)*p;
88781+ return (__force const __u32)*p;
88782 }
88783 static inline __le16 __cpu_to_le16p(const __u16 *p)
88784 {
88785- return (__force __le16)*p;
88786+ return (__force const __le16)*p;
88787 }
88788 static inline __u16 __le16_to_cpup(const __le16 *p)
88789 {
88790- return (__force __u16)*p;
88791+ return (__force const __u16)*p;
88792 }
88793 static inline __be64 __cpu_to_be64p(const __u64 *p)
88794 {
88795- return (__force __be64)__swab64p(p);
88796+ return (__force const __be64)__swab64p(p);
88797 }
88798 static inline __u64 __be64_to_cpup(const __be64 *p)
88799 {
88800- return __swab64p((__u64 *)p);
88801+ return __swab64p((const __u64 *)p);
88802 }
88803 static inline __be32 __cpu_to_be32p(const __u32 *p)
88804 {
88805- return (__force __be32)__swab32p(p);
88806+ return (__force const __be32)__swab32p(p);
88807 }
88808-static inline __u32 __be32_to_cpup(const __be32 *p)
88809+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
88810 {
88811- return __swab32p((__u32 *)p);
88812+ return __swab32p((const __u32 *)p);
88813 }
88814 static inline __be16 __cpu_to_be16p(const __u16 *p)
88815 {
88816- return (__force __be16)__swab16p(p);
88817+ return (__force const __be16)__swab16p(p);
88818 }
88819 static inline __u16 __be16_to_cpup(const __be16 *p)
88820 {
88821- return __swab16p((__u16 *)p);
88822+ return __swab16p((const __u16 *)p);
88823 }
88824 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
88825 #define __le64_to_cpus(x) do { (void)(x); } while (0)
88826diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
88827index 71e1d0e..6cc9caf 100644
88828--- a/include/uapi/linux/elf.h
88829+++ b/include/uapi/linux/elf.h
88830@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
88831 #define PT_GNU_EH_FRAME 0x6474e550
88832
88833 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
88834+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
88835+
88836+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
88837+
88838+/* Constants for the e_flags field */
88839+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
88840+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
88841+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
88842+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
88843+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
88844+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
88845
88846 /*
88847 * Extended Numbering
88848@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
88849 #define DT_DEBUG 21
88850 #define DT_TEXTREL 22
88851 #define DT_JMPREL 23
88852+#define DT_FLAGS 30
88853+ #define DF_TEXTREL 0x00000004
88854 #define DT_ENCODING 32
88855 #define OLD_DT_LOOS 0x60000000
88856 #define DT_LOOS 0x6000000d
88857@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
88858 #define PF_W 0x2
88859 #define PF_X 0x1
88860
88861+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
88862+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
88863+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
88864+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
88865+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
88866+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
88867+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
88868+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
88869+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
88870+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
88871+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
88872+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
88873+
88874 typedef struct elf32_phdr{
88875 Elf32_Word p_type;
88876 Elf32_Off p_offset;
88877@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
88878 #define EI_OSABI 7
88879 #define EI_PAD 8
88880
88881+#define EI_PAX 14
88882+
88883 #define ELFMAG0 0x7f /* EI_MAG */
88884 #define ELFMAG1 'E'
88885 #define ELFMAG2 'L'
88886diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
88887index aa169c4..6a2771d 100644
88888--- a/include/uapi/linux/personality.h
88889+++ b/include/uapi/linux/personality.h
88890@@ -30,6 +30,7 @@ enum {
88891 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
88892 ADDR_NO_RANDOMIZE | \
88893 ADDR_COMPAT_LAYOUT | \
88894+ ADDR_LIMIT_3GB | \
88895 MMAP_PAGE_ZERO)
88896
88897 /*
88898diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
88899index 7530e74..e714828 100644
88900--- a/include/uapi/linux/screen_info.h
88901+++ b/include/uapi/linux/screen_info.h
88902@@ -43,7 +43,8 @@ struct screen_info {
88903 __u16 pages; /* 0x32 */
88904 __u16 vesa_attributes; /* 0x34 */
88905 __u32 capabilities; /* 0x36 */
88906- __u8 _reserved[6]; /* 0x3a */
88907+ __u16 vesapm_size; /* 0x3a */
88908+ __u8 _reserved[4]; /* 0x3c */
88909 } __attribute__((packed));
88910
88911 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
88912diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
88913index 0e011eb..82681b1 100644
88914--- a/include/uapi/linux/swab.h
88915+++ b/include/uapi/linux/swab.h
88916@@ -43,7 +43,7 @@
88917 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
88918 */
88919
88920-static inline __attribute_const__ __u16 __fswab16(__u16 val)
88921+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
88922 {
88923 #ifdef __HAVE_BUILTIN_BSWAP16__
88924 return __builtin_bswap16(val);
88925@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
88926 #endif
88927 }
88928
88929-static inline __attribute_const__ __u32 __fswab32(__u32 val)
88930+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
88931 {
88932 #ifdef __HAVE_BUILTIN_BSWAP32__
88933 return __builtin_bswap32(val);
88934@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
88935 #endif
88936 }
88937
88938-static inline __attribute_const__ __u64 __fswab64(__u64 val)
88939+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
88940 {
88941 #ifdef __HAVE_BUILTIN_BSWAP64__
88942 return __builtin_bswap64(val);
88943diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
88944index 1590c49..5eab462 100644
88945--- a/include/uapi/linux/xattr.h
88946+++ b/include/uapi/linux/xattr.h
88947@@ -73,5 +73,9 @@
88948 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
88949 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
88950
88951+/* User namespace */
88952+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
88953+#define XATTR_PAX_FLAGS_SUFFIX "flags"
88954+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
88955
88956 #endif /* _UAPI_LINUX_XATTR_H */
88957diff --git a/include/video/udlfb.h b/include/video/udlfb.h
88958index f9466fa..f4e2b81 100644
88959--- a/include/video/udlfb.h
88960+++ b/include/video/udlfb.h
88961@@ -53,10 +53,10 @@ struct dlfb_data {
88962 u32 pseudo_palette[256];
88963 int blank_mode; /*one of FB_BLANK_ */
88964 /* blit-only rendering path metrics, exposed through sysfs */
88965- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
88966- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
88967- atomic_t bytes_sent; /* to usb, after compression including overhead */
88968- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
88969+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
88970+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
88971+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
88972+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
88973 };
88974
88975 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
88976diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
88977index 30f5362..8ed8ac9 100644
88978--- a/include/video/uvesafb.h
88979+++ b/include/video/uvesafb.h
88980@@ -122,6 +122,7 @@ struct uvesafb_par {
88981 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
88982 u8 pmi_setpal; /* PMI for palette changes */
88983 u16 *pmi_base; /* protected mode interface location */
88984+ u8 *pmi_code; /* protected mode code location */
88985 void *pmi_start;
88986 void *pmi_pal;
88987 u8 *vbe_state_orig; /*
88988diff --git a/init/Kconfig b/init/Kconfig
88989index f5dbc6d..8259396 100644
88990--- a/init/Kconfig
88991+++ b/init/Kconfig
88992@@ -1136,6 +1136,7 @@ endif # CGROUPS
88993
88994 config CHECKPOINT_RESTORE
88995 bool "Checkpoint/restore support" if EXPERT
88996+ depends on !GRKERNSEC
88997 default n
88998 help
88999 Enables additional kernel features in a sake of checkpoint/restore.
89000@@ -1646,7 +1647,7 @@ config SLUB_DEBUG
89001
89002 config COMPAT_BRK
89003 bool "Disable heap randomization"
89004- default y
89005+ default n
89006 help
89007 Randomizing heap placement makes heap exploits harder, but it
89008 also breaks ancient binaries (including anything libc5 based).
89009@@ -1977,7 +1978,7 @@ config INIT_ALL_POSSIBLE
89010 config STOP_MACHINE
89011 bool
89012 default y
89013- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
89014+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
89015 help
89016 Need stop_machine() primitive.
89017
89018diff --git a/init/Makefile b/init/Makefile
89019index 7bc47ee..6da2dc7 100644
89020--- a/init/Makefile
89021+++ b/init/Makefile
89022@@ -2,6 +2,9 @@
89023 # Makefile for the linux kernel.
89024 #
89025
89026+ccflags-y := $(GCC_PLUGINS_CFLAGS)
89027+asflags-y := $(GCC_PLUGINS_AFLAGS)
89028+
89029 obj-y := main.o version.o mounts.o
89030 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
89031 obj-y += noinitramfs.o
89032diff --git a/init/do_mounts.c b/init/do_mounts.c
89033index eb41008..f5dbbf9 100644
89034--- a/init/do_mounts.c
89035+++ b/init/do_mounts.c
89036@@ -360,11 +360,11 @@ static void __init get_fs_names(char *page)
89037 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
89038 {
89039 struct super_block *s;
89040- int err = sys_mount(name, "/root", fs, flags, data);
89041+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
89042 if (err)
89043 return err;
89044
89045- sys_chdir("/root");
89046+ sys_chdir((const char __force_user *)"/root");
89047 s = current->fs->pwd.dentry->d_sb;
89048 ROOT_DEV = s->s_dev;
89049 printk(KERN_INFO
89050@@ -487,18 +487,18 @@ void __init change_floppy(char *fmt, ...)
89051 va_start(args, fmt);
89052 vsprintf(buf, fmt, args);
89053 va_end(args);
89054- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
89055+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
89056 if (fd >= 0) {
89057 sys_ioctl(fd, FDEJECT, 0);
89058 sys_close(fd);
89059 }
89060 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
89061- fd = sys_open("/dev/console", O_RDWR, 0);
89062+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
89063 if (fd >= 0) {
89064 sys_ioctl(fd, TCGETS, (long)&termios);
89065 termios.c_lflag &= ~ICANON;
89066 sys_ioctl(fd, TCSETSF, (long)&termios);
89067- sys_read(fd, &c, 1);
89068+ sys_read(fd, (char __user *)&c, 1);
89069 termios.c_lflag |= ICANON;
89070 sys_ioctl(fd, TCSETSF, (long)&termios);
89071 sys_close(fd);
89072@@ -592,8 +592,8 @@ void __init prepare_namespace(void)
89073 mount_root();
89074 out:
89075 devtmpfs_mount("dev");
89076- sys_mount(".", "/", NULL, MS_MOVE, NULL);
89077- sys_chroot(".");
89078+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
89079+ sys_chroot((const char __force_user *)".");
89080 }
89081
89082 static bool is_tmpfs;
89083diff --git a/init/do_mounts.h b/init/do_mounts.h
89084index f5b978a..69dbfe8 100644
89085--- a/init/do_mounts.h
89086+++ b/init/do_mounts.h
89087@@ -15,15 +15,15 @@ extern int root_mountflags;
89088
89089 static inline int create_dev(char *name, dev_t dev)
89090 {
89091- sys_unlink(name);
89092- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
89093+ sys_unlink((char __force_user *)name);
89094+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
89095 }
89096
89097 #if BITS_PER_LONG == 32
89098 static inline u32 bstat(char *name)
89099 {
89100 struct stat64 stat;
89101- if (sys_stat64(name, &stat) != 0)
89102+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
89103 return 0;
89104 if (!S_ISBLK(stat.st_mode))
89105 return 0;
89106@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
89107 static inline u32 bstat(char *name)
89108 {
89109 struct stat stat;
89110- if (sys_newstat(name, &stat) != 0)
89111+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
89112 return 0;
89113 if (!S_ISBLK(stat.st_mode))
89114 return 0;
89115diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
89116index 3e0878e..8a9d7a0 100644
89117--- a/init/do_mounts_initrd.c
89118+++ b/init/do_mounts_initrd.c
89119@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
89120 {
89121 sys_unshare(CLONE_FS | CLONE_FILES);
89122 /* stdin/stdout/stderr for /linuxrc */
89123- sys_open("/dev/console", O_RDWR, 0);
89124+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
89125 sys_dup(0);
89126 sys_dup(0);
89127 /* move initrd over / and chdir/chroot in initrd root */
89128- sys_chdir("/root");
89129- sys_mount(".", "/", NULL, MS_MOVE, NULL);
89130- sys_chroot(".");
89131+ sys_chdir((const char __force_user *)"/root");
89132+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
89133+ sys_chroot((const char __force_user *)".");
89134 sys_setsid();
89135 return 0;
89136 }
89137@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
89138 create_dev("/dev/root.old", Root_RAM0);
89139 /* mount initrd on rootfs' /root */
89140 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
89141- sys_mkdir("/old", 0700);
89142- sys_chdir("/old");
89143+ sys_mkdir((const char __force_user *)"/old", 0700);
89144+ sys_chdir((const char __force_user *)"/old");
89145
89146 /* try loading default modules from initrd */
89147 load_default_modules();
89148@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
89149 current->flags &= ~PF_FREEZER_SKIP;
89150
89151 /* move initrd to rootfs' /old */
89152- sys_mount("..", ".", NULL, MS_MOVE, NULL);
89153+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
89154 /* switch root and cwd back to / of rootfs */
89155- sys_chroot("..");
89156+ sys_chroot((const char __force_user *)"..");
89157
89158 if (new_decode_dev(real_root_dev) == Root_RAM0) {
89159- sys_chdir("/old");
89160+ sys_chdir((const char __force_user *)"/old");
89161 return;
89162 }
89163
89164- sys_chdir("/");
89165+ sys_chdir((const char __force_user *)"/");
89166 ROOT_DEV = new_decode_dev(real_root_dev);
89167 mount_root();
89168
89169 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
89170- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
89171+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
89172 if (!error)
89173 printk("okay\n");
89174 else {
89175- int fd = sys_open("/dev/root.old", O_RDWR, 0);
89176+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
89177 if (error == -ENOENT)
89178 printk("/initrd does not exist. Ignored.\n");
89179 else
89180 printk("failed\n");
89181 printk(KERN_NOTICE "Unmounting old root\n");
89182- sys_umount("/old", MNT_DETACH);
89183+ sys_umount((char __force_user *)"/old", MNT_DETACH);
89184 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
89185 if (fd < 0) {
89186 error = fd;
89187@@ -127,11 +127,11 @@ int __init initrd_load(void)
89188 * mounted in the normal path.
89189 */
89190 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
89191- sys_unlink("/initrd.image");
89192+ sys_unlink((const char __force_user *)"/initrd.image");
89193 handle_initrd();
89194 return 1;
89195 }
89196 }
89197- sys_unlink("/initrd.image");
89198+ sys_unlink((const char __force_user *)"/initrd.image");
89199 return 0;
89200 }
89201diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
89202index 8cb6db5..d729f50 100644
89203--- a/init/do_mounts_md.c
89204+++ b/init/do_mounts_md.c
89205@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
89206 partitioned ? "_d" : "", minor,
89207 md_setup_args[ent].device_names);
89208
89209- fd = sys_open(name, 0, 0);
89210+ fd = sys_open((char __force_user *)name, 0, 0);
89211 if (fd < 0) {
89212 printk(KERN_ERR "md: open failed - cannot start "
89213 "array %s\n", name);
89214@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
89215 * array without it
89216 */
89217 sys_close(fd);
89218- fd = sys_open(name, 0, 0);
89219+ fd = sys_open((char __force_user *)name, 0, 0);
89220 sys_ioctl(fd, BLKRRPART, 0);
89221 }
89222 sys_close(fd);
89223@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
89224
89225 wait_for_device_probe();
89226
89227- fd = sys_open("/dev/md0", 0, 0);
89228+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
89229 if (fd >= 0) {
89230 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
89231 sys_close(fd);
89232diff --git a/init/init_task.c b/init/init_task.c
89233index ba0a7f36..2bcf1d5 100644
89234--- a/init/init_task.c
89235+++ b/init/init_task.c
89236@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
89237 * Initial thread structure. Alignment of this is handled by a special
89238 * linker map entry.
89239 */
89240+#ifdef CONFIG_X86
89241+union thread_union init_thread_union __init_task_data;
89242+#else
89243 union thread_union init_thread_union __init_task_data =
89244 { INIT_THREAD_INFO(init_task) };
89245+#endif
89246diff --git a/init/initramfs.c b/init/initramfs.c
89247index ad1bd77..dca2c1b 100644
89248--- a/init/initramfs.c
89249+++ b/init/initramfs.c
89250@@ -25,7 +25,7 @@ static ssize_t __init xwrite(int fd, const char *p, size_t count)
89251
89252 /* sys_write only can write MAX_RW_COUNT aka 2G-4K bytes at most */
89253 while (count) {
89254- ssize_t rv = sys_write(fd, p, count);
89255+ ssize_t rv = sys_write(fd, (char __force_user *)p, count);
89256
89257 if (rv < 0) {
89258 if (rv == -EINTR || rv == -EAGAIN)
89259@@ -107,7 +107,7 @@ static void __init free_hash(void)
89260 }
89261 }
89262
89263-static long __init do_utime(char *filename, time_t mtime)
89264+static long __init do_utime(char __force_user *filename, time_t mtime)
89265 {
89266 struct timespec t[2];
89267
89268@@ -142,7 +142,7 @@ static void __init dir_utime(void)
89269 struct dir_entry *de, *tmp;
89270 list_for_each_entry_safe(de, tmp, &dir_list, list) {
89271 list_del(&de->list);
89272- do_utime(de->name, de->mtime);
89273+ do_utime((char __force_user *)de->name, de->mtime);
89274 kfree(de->name);
89275 kfree(de);
89276 }
89277@@ -304,7 +304,7 @@ static int __init maybe_link(void)
89278 if (nlink >= 2) {
89279 char *old = find_link(major, minor, ino, mode, collected);
89280 if (old)
89281- return (sys_link(old, collected) < 0) ? -1 : 1;
89282+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
89283 }
89284 return 0;
89285 }
89286@@ -313,11 +313,11 @@ static void __init clean_path(char *path, umode_t fmode)
89287 {
89288 struct stat st;
89289
89290- if (!sys_newlstat(path, &st) && (st.st_mode ^ fmode) & S_IFMT) {
89291+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode ^ fmode) & S_IFMT) {
89292 if (S_ISDIR(st.st_mode))
89293- sys_rmdir(path);
89294+ sys_rmdir((char __force_user *)path);
89295 else
89296- sys_unlink(path);
89297+ sys_unlink((char __force_user *)path);
89298 }
89299 }
89300
89301@@ -338,7 +338,7 @@ static int __init do_name(void)
89302 int openflags = O_WRONLY|O_CREAT;
89303 if (ml != 1)
89304 openflags |= O_TRUNC;
89305- wfd = sys_open(collected, openflags, mode);
89306+ wfd = sys_open((char __force_user *)collected, openflags, mode);
89307
89308 if (wfd >= 0) {
89309 sys_fchown(wfd, uid, gid);
89310@@ -350,17 +350,17 @@ static int __init do_name(void)
89311 }
89312 }
89313 } else if (S_ISDIR(mode)) {
89314- sys_mkdir(collected, mode);
89315- sys_chown(collected, uid, gid);
89316- sys_chmod(collected, mode);
89317+ sys_mkdir((char __force_user *)collected, mode);
89318+ sys_chown((char __force_user *)collected, uid, gid);
89319+ sys_chmod((char __force_user *)collected, mode);
89320 dir_add(collected, mtime);
89321 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
89322 S_ISFIFO(mode) || S_ISSOCK(mode)) {
89323 if (maybe_link() == 0) {
89324- sys_mknod(collected, mode, rdev);
89325- sys_chown(collected, uid, gid);
89326- sys_chmod(collected, mode);
89327- do_utime(collected, mtime);
89328+ sys_mknod((char __force_user *)collected, mode, rdev);
89329+ sys_chown((char __force_user *)collected, uid, gid);
89330+ sys_chmod((char __force_user *)collected, mode);
89331+ do_utime((char __force_user *)collected, mtime);
89332 }
89333 }
89334 return 0;
89335@@ -372,7 +372,7 @@ static int __init do_copy(void)
89336 if (xwrite(wfd, victim, body_len) != body_len)
89337 error("write error");
89338 sys_close(wfd);
89339- do_utime(vcollected, mtime);
89340+ do_utime((char __force_user *)vcollected, mtime);
89341 kfree(vcollected);
89342 eat(body_len);
89343 state = SkipIt;
89344@@ -390,9 +390,9 @@ static int __init do_symlink(void)
89345 {
89346 collected[N_ALIGN(name_len) + body_len] = '\0';
89347 clean_path(collected, 0);
89348- sys_symlink(collected + N_ALIGN(name_len), collected);
89349- sys_lchown(collected, uid, gid);
89350- do_utime(collected, mtime);
89351+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
89352+ sys_lchown((char __force_user *)collected, uid, gid);
89353+ do_utime((char __force_user *)collected, mtime);
89354 state = SkipIt;
89355 next_state = Reset;
89356 return 0;
89357diff --git a/init/main.c b/init/main.c
89358index 6f0f1c5f..a542824 100644
89359--- a/init/main.c
89360+++ b/init/main.c
89361@@ -96,6 +96,8 @@ extern void radix_tree_init(void);
89362 static inline void mark_rodata_ro(void) { }
89363 #endif
89364
89365+extern void grsecurity_init(void);
89366+
89367 /*
89368 * Debug helper: via this flag we know that we are in 'early bootup code'
89369 * where only the boot processor is running with IRQ disabled. This means
89370@@ -157,6 +159,85 @@ static int __init set_reset_devices(char *str)
89371
89372 __setup("reset_devices", set_reset_devices);
89373
89374+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
89375+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
89376+static int __init setup_grsec_proc_gid(char *str)
89377+{
89378+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
89379+ return 1;
89380+}
89381+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
89382+#endif
89383+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
89384+int grsec_enable_sysfs_restrict = 1;
89385+static int __init setup_grsec_sysfs_restrict(char *str)
89386+{
89387+ if (!simple_strtol(str, NULL, 0))
89388+ grsec_enable_sysfs_restrict = 0;
89389+ return 1;
89390+}
89391+__setup("grsec_sysfs_restrict", setup_grsec_sysfs_restrict);
89392+#endif
89393+
89394+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
89395+unsigned long pax_user_shadow_base __read_only;
89396+EXPORT_SYMBOL(pax_user_shadow_base);
89397+extern char pax_enter_kernel_user[];
89398+extern char pax_exit_kernel_user[];
89399+#endif
89400+
89401+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
89402+static int __init setup_pax_nouderef(char *str)
89403+{
89404+#ifdef CONFIG_X86_32
89405+ unsigned int cpu;
89406+ struct desc_struct *gdt;
89407+
89408+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
89409+ gdt = get_cpu_gdt_table(cpu);
89410+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
89411+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
89412+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
89413+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
89414+ }
89415+ loadsegment(ds, __KERNEL_DS);
89416+ loadsegment(es, __KERNEL_DS);
89417+ loadsegment(ss, __KERNEL_DS);
89418+#else
89419+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
89420+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
89421+ clone_pgd_mask = ~(pgdval_t)0UL;
89422+ pax_user_shadow_base = 0UL;
89423+ setup_clear_cpu_cap(X86_FEATURE_PCID);
89424+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
89425+#endif
89426+
89427+ return 0;
89428+}
89429+early_param("pax_nouderef", setup_pax_nouderef);
89430+
89431+#ifdef CONFIG_X86_64
89432+static int __init setup_pax_weakuderef(char *str)
89433+{
89434+ if (clone_pgd_mask != ~(pgdval_t)0UL)
89435+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
89436+ return 1;
89437+}
89438+__setup("pax_weakuderef", setup_pax_weakuderef);
89439+#endif
89440+#endif
89441+
89442+#ifdef CONFIG_PAX_SOFTMODE
89443+int pax_softmode;
89444+
89445+static int __init setup_pax_softmode(char *str)
89446+{
89447+ get_option(&str, &pax_softmode);
89448+ return 1;
89449+}
89450+__setup("pax_softmode=", setup_pax_softmode);
89451+#endif
89452+
89453 static const char *argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
89454 const char *envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
89455 static const char *panic_later, *panic_param;
89456@@ -722,7 +803,7 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
89457 struct blacklist_entry *entry;
89458 char *fn_name;
89459
89460- fn_name = kasprintf(GFP_KERNEL, "%pf", fn);
89461+ fn_name = kasprintf(GFP_KERNEL, "%pX", fn);
89462 if (!fn_name)
89463 return false;
89464
89465@@ -774,7 +855,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
89466 {
89467 int count = preempt_count();
89468 int ret;
89469- char msgbuf[64];
89470+ const char *msg1 = "", *msg2 = "";
89471
89472 if (initcall_blacklisted(fn))
89473 return -EPERM;
89474@@ -784,18 +865,17 @@ int __init_or_module do_one_initcall(initcall_t fn)
89475 else
89476 ret = fn();
89477
89478- msgbuf[0] = 0;
89479-
89480 if (preempt_count() != count) {
89481- sprintf(msgbuf, "preemption imbalance ");
89482+ msg1 = " preemption imbalance";
89483 preempt_count_set(count);
89484 }
89485 if (irqs_disabled()) {
89486- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
89487+ msg2 = " disabled interrupts";
89488 local_irq_enable();
89489 }
89490- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
89491+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
89492
89493+ add_latent_entropy();
89494 return ret;
89495 }
89496
89497@@ -901,8 +981,8 @@ static int run_init_process(const char *init_filename)
89498 {
89499 argv_init[0] = init_filename;
89500 return do_execve(getname_kernel(init_filename),
89501- (const char __user *const __user *)argv_init,
89502- (const char __user *const __user *)envp_init);
89503+ (const char __user *const __force_user *)argv_init,
89504+ (const char __user *const __force_user *)envp_init);
89505 }
89506
89507 static int try_to_run_init_process(const char *init_filename)
89508@@ -919,6 +999,10 @@ static int try_to_run_init_process(const char *init_filename)
89509 return ret;
89510 }
89511
89512+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
89513+extern int gr_init_ran;
89514+#endif
89515+
89516 static noinline void __init kernel_init_freeable(void);
89517
89518 static int __ref kernel_init(void *unused)
89519@@ -943,6 +1027,11 @@ static int __ref kernel_init(void *unused)
89520 ramdisk_execute_command, ret);
89521 }
89522
89523+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
89524+ /* if no initrd was used, be extra sure we enforce chroot restrictions */
89525+ gr_init_ran = 1;
89526+#endif
89527+
89528 /*
89529 * We try each of these until one succeeds.
89530 *
89531@@ -998,7 +1087,7 @@ static noinline void __init kernel_init_freeable(void)
89532 do_basic_setup();
89533
89534 /* Open the /dev/console on the rootfs, this should never fail */
89535- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
89536+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
89537 pr_err("Warning: unable to open an initial console.\n");
89538
89539 (void) sys_dup(0);
89540@@ -1011,11 +1100,13 @@ static noinline void __init kernel_init_freeable(void)
89541 if (!ramdisk_execute_command)
89542 ramdisk_execute_command = "/init";
89543
89544- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
89545+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
89546 ramdisk_execute_command = NULL;
89547 prepare_namespace();
89548 }
89549
89550+ grsecurity_init();
89551+
89552 /*
89553 * Ok, we have completed the initial bootup, and
89554 * we're essentially up and running. Get rid of the
89555diff --git a/ipc/compat.c b/ipc/compat.c
89556index 9b3c85f..5266b0f 100644
89557--- a/ipc/compat.c
89558+++ b/ipc/compat.c
89559@@ -396,7 +396,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
89560 COMPAT_SHMLBA);
89561 if (err < 0)
89562 return err;
89563- return put_user(raddr, (compat_ulong_t *)compat_ptr(third));
89564+ return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third));
89565 }
89566 case SHMDT:
89567 return sys_shmdt(compat_ptr(ptr));
89568@@ -747,7 +747,7 @@ COMPAT_SYSCALL_DEFINE3(shmctl, int, first, int, second, void __user *, uptr)
89569 }
89570
89571 COMPAT_SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsems,
89572- unsigned, nsops,
89573+ compat_long_t, nsops,
89574 const struct compat_timespec __user *, timeout)
89575 {
89576 struct timespec __user *ts64;
89577diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
89578index 8ad93c2..efd80f8 100644
89579--- a/ipc/ipc_sysctl.c
89580+++ b/ipc/ipc_sysctl.c
89581@@ -30,7 +30,7 @@ static void *get_ipc(struct ctl_table *table)
89582 static int proc_ipc_dointvec(struct ctl_table *table, int write,
89583 void __user *buffer, size_t *lenp, loff_t *ppos)
89584 {
89585- struct ctl_table ipc_table;
89586+ ctl_table_no_const ipc_table;
89587
89588 memcpy(&ipc_table, table, sizeof(ipc_table));
89589 ipc_table.data = get_ipc(table);
89590@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(struct ctl_table *table, int write,
89591 static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write,
89592 void __user *buffer, size_t *lenp, loff_t *ppos)
89593 {
89594- struct ctl_table ipc_table;
89595+ ctl_table_no_const ipc_table;
89596
89597 memcpy(&ipc_table, table, sizeof(ipc_table));
89598 ipc_table.data = get_ipc(table);
89599@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
89600 static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
89601 void __user *buffer, size_t *lenp, loff_t *ppos)
89602 {
89603- struct ctl_table ipc_table;
89604+ ctl_table_no_const ipc_table;
89605 memcpy(&ipc_table, table, sizeof(ipc_table));
89606 ipc_table.data = get_ipc(table);
89607
89608@@ -76,7 +76,7 @@ static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
89609 static int proc_ipc_auto_msgmni(struct ctl_table *table, int write,
89610 void __user *buffer, size_t *lenp, loff_t *ppos)
89611 {
89612- struct ctl_table ipc_table;
89613+ ctl_table_no_const ipc_table;
89614 int dummy = 0;
89615
89616 memcpy(&ipc_table, table, sizeof(ipc_table));
89617diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
89618index 68d4e95..1477ded 100644
89619--- a/ipc/mq_sysctl.c
89620+++ b/ipc/mq_sysctl.c
89621@@ -25,7 +25,7 @@ static void *get_mq(struct ctl_table *table)
89622 static int proc_mq_dointvec(struct ctl_table *table, int write,
89623 void __user *buffer, size_t *lenp, loff_t *ppos)
89624 {
89625- struct ctl_table mq_table;
89626+ ctl_table_no_const mq_table;
89627 memcpy(&mq_table, table, sizeof(mq_table));
89628 mq_table.data = get_mq(table);
89629
89630@@ -35,7 +35,7 @@ static int proc_mq_dointvec(struct ctl_table *table, int write,
89631 static int proc_mq_dointvec_minmax(struct ctl_table *table, int write,
89632 void __user *buffer, size_t *lenp, loff_t *ppos)
89633 {
89634- struct ctl_table mq_table;
89635+ ctl_table_no_const mq_table;
89636 memcpy(&mq_table, table, sizeof(mq_table));
89637 mq_table.data = get_mq(table);
89638
89639diff --git a/ipc/mqueue.c b/ipc/mqueue.c
89640index 7635a1c..7432cb6 100644
89641--- a/ipc/mqueue.c
89642+++ b/ipc/mqueue.c
89643@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
89644 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
89645 info->attr.mq_msgsize);
89646
89647+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
89648 spin_lock(&mq_lock);
89649 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
89650 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
89651diff --git a/ipc/sem.c b/ipc/sem.c
89652index 9284211..bca5b1b 100644
89653--- a/ipc/sem.c
89654+++ b/ipc/sem.c
89655@@ -1780,7 +1780,7 @@ static int get_queue_result(struct sem_queue *q)
89656 }
89657
89658 SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
89659- unsigned, nsops, const struct timespec __user *, timeout)
89660+ long, nsops, const struct timespec __user *, timeout)
89661 {
89662 int error = -EINVAL;
89663 struct sem_array *sma;
89664@@ -2015,7 +2015,7 @@ out_free:
89665 }
89666
89667 SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
89668- unsigned, nsops)
89669+ long, nsops)
89670 {
89671 return sys_semtimedop(semid, tsops, nsops, NULL);
89672 }
89673diff --git a/ipc/shm.c b/ipc/shm.c
89674index 19633b4..d454904 100644
89675--- a/ipc/shm.c
89676+++ b/ipc/shm.c
89677@@ -72,6 +72,14 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
89678 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
89679 #endif
89680
89681+#ifdef CONFIG_GRKERNSEC
89682+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
89683+ const u64 shm_createtime, const kuid_t cuid,
89684+ const int shmid);
89685+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
89686+ const u64 shm_createtime);
89687+#endif
89688+
89689 void shm_init_ns(struct ipc_namespace *ns)
89690 {
89691 ns->shm_ctlmax = SHMMAX;
89692@@ -560,6 +568,9 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
89693 shp->shm_lprid = 0;
89694 shp->shm_atim = shp->shm_dtim = 0;
89695 shp->shm_ctim = get_seconds();
89696+#ifdef CONFIG_GRKERNSEC
89697+ shp->shm_createtime = ktime_get_ns();
89698+#endif
89699 shp->shm_segsz = size;
89700 shp->shm_nattch = 0;
89701 shp->shm_file = file;
89702@@ -1096,6 +1107,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
89703 f_mode = FMODE_READ | FMODE_WRITE;
89704 }
89705 if (shmflg & SHM_EXEC) {
89706+
89707+#ifdef CONFIG_PAX_MPROTECT
89708+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
89709+ goto out;
89710+#endif
89711+
89712 prot |= PROT_EXEC;
89713 acc_mode |= S_IXUGO;
89714 }
89715@@ -1120,6 +1137,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
89716 if (err)
89717 goto out_unlock;
89718
89719+#ifdef CONFIG_GRKERNSEC
89720+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
89721+ shp->shm_perm.cuid, shmid) ||
89722+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
89723+ err = -EACCES;
89724+ goto out_unlock;
89725+ }
89726+#endif
89727+
89728 ipc_lock_object(&shp->shm_perm);
89729
89730 /* check if shm_destroy() is tearing down shp */
89731@@ -1132,6 +1158,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
89732 path = shp->shm_file->f_path;
89733 path_get(&path);
89734 shp->shm_nattch++;
89735+#ifdef CONFIG_GRKERNSEC
89736+ shp->shm_lapid = current->pid;
89737+#endif
89738 size = i_size_read(path.dentry->d_inode);
89739 ipc_unlock_object(&shp->shm_perm);
89740 rcu_read_unlock();
89741diff --git a/ipc/util.c b/ipc/util.c
89742index 106bed0..f851429 100644
89743--- a/ipc/util.c
89744+++ b/ipc/util.c
89745@@ -71,6 +71,8 @@ struct ipc_proc_iface {
89746 int (*show)(struct seq_file *, void *);
89747 };
89748
89749+extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
89750+
89751 /**
89752 * ipc_init - initialise ipc subsystem
89753 *
89754@@ -497,6 +499,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
89755 granted_mode >>= 6;
89756 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
89757 granted_mode >>= 3;
89758+
89759+ if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
89760+ return -1;
89761+
89762 /* is there some bit set in requested_mode but not in granted_mode? */
89763 if ((requested_mode & ~granted_mode & 0007) &&
89764 !ns_capable(ns->user_ns, CAP_IPC_OWNER))
89765diff --git a/kernel/audit.c b/kernel/audit.c
89766index 72ab759..757deba 100644
89767--- a/kernel/audit.c
89768+++ b/kernel/audit.c
89769@@ -122,7 +122,7 @@ u32 audit_sig_sid = 0;
89770 3) suppressed due to audit_rate_limit
89771 4) suppressed due to audit_backlog_limit
89772 */
89773-static atomic_t audit_lost = ATOMIC_INIT(0);
89774+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
89775
89776 /* The netlink socket. */
89777 static struct sock *audit_sock;
89778@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
89779 unsigned long now;
89780 int print;
89781
89782- atomic_inc(&audit_lost);
89783+ atomic_inc_unchecked(&audit_lost);
89784
89785 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
89786
89787@@ -273,7 +273,7 @@ void audit_log_lost(const char *message)
89788 if (print) {
89789 if (printk_ratelimit())
89790 pr_warn("audit_lost=%u audit_rate_limit=%u audit_backlog_limit=%u\n",
89791- atomic_read(&audit_lost),
89792+ atomic_read_unchecked(&audit_lost),
89793 audit_rate_limit,
89794 audit_backlog_limit);
89795 audit_panic(message);
89796@@ -831,7 +831,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
89797 s.pid = audit_pid;
89798 s.rate_limit = audit_rate_limit;
89799 s.backlog_limit = audit_backlog_limit;
89800- s.lost = atomic_read(&audit_lost);
89801+ s.lost = atomic_read_unchecked(&audit_lost);
89802 s.backlog = skb_queue_len(&audit_skb_queue);
89803 s.feature_bitmap = AUDIT_FEATURE_BITMAP_ALL;
89804 s.backlog_wait_time = audit_backlog_wait_time;
89805diff --git a/kernel/auditsc.c b/kernel/auditsc.c
89806index dc4ae70..2a2bddc 100644
89807--- a/kernel/auditsc.c
89808+++ b/kernel/auditsc.c
89809@@ -1955,7 +1955,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
89810 }
89811
89812 /* global counter which is incremented every time something logs in */
89813-static atomic_t session_id = ATOMIC_INIT(0);
89814+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
89815
89816 static int audit_set_loginuid_perm(kuid_t loginuid)
89817 {
89818@@ -2022,7 +2022,7 @@ int audit_set_loginuid(kuid_t loginuid)
89819
89820 /* are we setting or clearing? */
89821 if (uid_valid(loginuid))
89822- sessionid = (unsigned int)atomic_inc_return(&session_id);
89823+ sessionid = (unsigned int)atomic_inc_return_unchecked(&session_id);
89824
89825 task->sessionid = sessionid;
89826 task->loginuid = loginuid;
89827diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
89828index 0c5796e..a9414e2 100644
89829--- a/kernel/bpf/core.c
89830+++ b/kernel/bpf/core.c
89831@@ -143,14 +143,17 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
89832 * random section of illegal instructions.
89833 */
89834 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
89835- hdr = module_alloc(size);
89836+ hdr = module_alloc_exec(size);
89837 if (hdr == NULL)
89838 return NULL;
89839
89840 /* Fill space with illegal/arch-dep instructions. */
89841 bpf_fill_ill_insns(hdr, size);
89842
89843+ pax_open_kernel();
89844 hdr->pages = size / PAGE_SIZE;
89845+ pax_close_kernel();
89846+
89847 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
89848 PAGE_SIZE - sizeof(*hdr));
89849 start = (prandom_u32() % hole) & ~(alignment - 1);
89850@@ -163,7 +166,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
89851
89852 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
89853 {
89854- module_memfree(hdr);
89855+ module_memfree_exec(hdr);
89856 }
89857 #endif /* CONFIG_BPF_JIT */
89858
89859diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
89860index 536edc2..d28c85d 100644
89861--- a/kernel/bpf/syscall.c
89862+++ b/kernel/bpf/syscall.c
89863@@ -548,11 +548,15 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
89864 int err;
89865
89866 /* the syscall is limited to root temporarily. This restriction will be
89867- * lifted when security audit is clean. Note that eBPF+tracing must have
89868- * this restriction, since it may pass kernel data to user space
89869+ * lifted by upstream when a half-assed security audit is clean. Note
89870+ * that eBPF+tracing must have this restriction, since it may pass
89871+ * kernel data to user space
89872 */
89873 if (!capable(CAP_SYS_ADMIN))
89874 return -EPERM;
89875+#ifdef CONFIG_GRKERNSEC
89876+ return -EPERM;
89877+#endif
89878
89879 if (!access_ok(VERIFY_READ, uattr, 1))
89880 return -EFAULT;
89881diff --git a/kernel/capability.c b/kernel/capability.c
89882index 989f5bf..d317ca0 100644
89883--- a/kernel/capability.c
89884+++ b/kernel/capability.c
89885@@ -192,6 +192,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
89886 * before modification is attempted and the application
89887 * fails.
89888 */
89889+ if (tocopy > ARRAY_SIZE(kdata))
89890+ return -EFAULT;
89891+
89892 if (copy_to_user(dataptr, kdata, tocopy
89893 * sizeof(struct __user_cap_data_struct))) {
89894 return -EFAULT;
89895@@ -297,10 +300,11 @@ bool has_ns_capability(struct task_struct *t,
89896 int ret;
89897
89898 rcu_read_lock();
89899- ret = security_capable(__task_cred(t), ns, cap);
89900+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
89901+ gr_task_is_capable(t, __task_cred(t), cap);
89902 rcu_read_unlock();
89903
89904- return (ret == 0);
89905+ return ret;
89906 }
89907
89908 /**
89909@@ -337,10 +341,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
89910 int ret;
89911
89912 rcu_read_lock();
89913- ret = security_capable_noaudit(__task_cred(t), ns, cap);
89914+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
89915 rcu_read_unlock();
89916
89917- return (ret == 0);
89918+ return ret;
89919 }
89920
89921 /**
89922@@ -378,7 +382,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
89923 BUG();
89924 }
89925
89926- if (security_capable(current_cred(), ns, cap) == 0) {
89927+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
89928 current->flags |= PF_SUPERPRIV;
89929 return true;
89930 }
89931@@ -386,6 +390,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
89932 }
89933 EXPORT_SYMBOL(ns_capable);
89934
89935+bool ns_capable_nolog(struct user_namespace *ns, int cap)
89936+{
89937+ if (unlikely(!cap_valid(cap))) {
89938+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
89939+ BUG();
89940+ }
89941+
89942+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
89943+ current->flags |= PF_SUPERPRIV;
89944+ return true;
89945+ }
89946+ return false;
89947+}
89948+EXPORT_SYMBOL(ns_capable_nolog);
89949+
89950 /**
89951 * file_ns_capable - Determine if the file's opener had a capability in effect
89952 * @file: The file we want to check
89953@@ -427,6 +446,12 @@ bool capable(int cap)
89954 }
89955 EXPORT_SYMBOL(capable);
89956
89957+bool capable_nolog(int cap)
89958+{
89959+ return ns_capable_nolog(&init_user_ns, cap);
89960+}
89961+EXPORT_SYMBOL(capable_nolog);
89962+
89963 /**
89964 * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
89965 * @inode: The inode in question
89966@@ -444,3 +469,12 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
89967 kgid_has_mapping(ns, inode->i_gid);
89968 }
89969 EXPORT_SYMBOL(capable_wrt_inode_uidgid);
89970+
89971+bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap)
89972+{
89973+ struct user_namespace *ns = current_user_ns();
89974+
89975+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
89976+ kgid_has_mapping(ns, inode->i_gid);
89977+}
89978+EXPORT_SYMBOL(capable_wrt_inode_uidgid_nolog);
89979diff --git a/kernel/cgroup.c b/kernel/cgroup.c
89980index 29a7b2c..a64e30a 100644
89981--- a/kernel/cgroup.c
89982+++ b/kernel/cgroup.c
89983@@ -5347,6 +5347,9 @@ static void cgroup_release_agent(struct work_struct *work)
89984 if (!pathbuf || !agentbuf)
89985 goto out;
89986
89987+ if (agentbuf[0] == '\0')
89988+ goto out;
89989+
89990 path = cgroup_path(cgrp, pathbuf, PATH_MAX);
89991 if (!path)
89992 goto out;
89993@@ -5532,7 +5535,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
89994 struct task_struct *task;
89995 int count = 0;
89996
89997- seq_printf(seq, "css_set %p\n", cset);
89998+ seq_printf(seq, "css_set %pK\n", cset);
89999
90000 list_for_each_entry(task, &cset->tasks, cg_list) {
90001 if (count++ > MAX_TASKS_SHOWN_PER_CSS)
90002diff --git a/kernel/compat.c b/kernel/compat.c
90003index 24f0061..ea80802 100644
90004--- a/kernel/compat.c
90005+++ b/kernel/compat.c
90006@@ -13,6 +13,7 @@
90007
90008 #include <linux/linkage.h>
90009 #include <linux/compat.h>
90010+#include <linux/module.h>
90011 #include <linux/errno.h>
90012 #include <linux/time.h>
90013 #include <linux/signal.h>
90014@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
90015 mm_segment_t oldfs;
90016 long ret;
90017
90018- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
90019+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
90020 oldfs = get_fs();
90021 set_fs(KERNEL_DS);
90022 ret = hrtimer_nanosleep_restart(restart);
90023@@ -252,7 +253,7 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
90024 oldfs = get_fs();
90025 set_fs(KERNEL_DS);
90026 ret = hrtimer_nanosleep(&tu,
90027- rmtp ? (struct timespec __user *)&rmt : NULL,
90028+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
90029 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
90030 set_fs(oldfs);
90031
90032@@ -378,7 +379,7 @@ COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set)
90033 mm_segment_t old_fs = get_fs();
90034
90035 set_fs(KERNEL_DS);
90036- ret = sys_sigpending((old_sigset_t __user *) &s);
90037+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
90038 set_fs(old_fs);
90039 if (ret == 0)
90040 ret = put_user(s, set);
90041@@ -468,7 +469,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
90042 mm_segment_t old_fs = get_fs();
90043
90044 set_fs(KERNEL_DS);
90045- ret = sys_old_getrlimit(resource, (struct rlimit __user *)&r);
90046+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
90047 set_fs(old_fs);
90048
90049 if (!ret) {
90050@@ -550,8 +551,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
90051 set_fs (KERNEL_DS);
90052 ret = sys_wait4(pid,
90053 (stat_addr ?
90054- (unsigned int __user *) &status : NULL),
90055- options, (struct rusage __user *) &r);
90056+ (unsigned int __force_user *) &status : NULL),
90057+ options, (struct rusage __force_user *) &r);
90058 set_fs (old_fs);
90059
90060 if (ret > 0) {
90061@@ -577,8 +578,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
90062 memset(&info, 0, sizeof(info));
90063
90064 set_fs(KERNEL_DS);
90065- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
90066- uru ? (struct rusage __user *)&ru : NULL);
90067+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
90068+ uru ? (struct rusage __force_user *)&ru : NULL);
90069 set_fs(old_fs);
90070
90071 if ((ret < 0) || (info.si_signo == 0))
90072@@ -712,8 +713,8 @@ COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
90073 oldfs = get_fs();
90074 set_fs(KERNEL_DS);
90075 err = sys_timer_settime(timer_id, flags,
90076- (struct itimerspec __user *) &newts,
90077- (struct itimerspec __user *) &oldts);
90078+ (struct itimerspec __force_user *) &newts,
90079+ (struct itimerspec __force_user *) &oldts);
90080 set_fs(oldfs);
90081 if (!err && old && put_compat_itimerspec(old, &oldts))
90082 return -EFAULT;
90083@@ -730,7 +731,7 @@ COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
90084 oldfs = get_fs();
90085 set_fs(KERNEL_DS);
90086 err = sys_timer_gettime(timer_id,
90087- (struct itimerspec __user *) &ts);
90088+ (struct itimerspec __force_user *) &ts);
90089 set_fs(oldfs);
90090 if (!err && put_compat_itimerspec(setting, &ts))
90091 return -EFAULT;
90092@@ -749,7 +750,7 @@ COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
90093 oldfs = get_fs();
90094 set_fs(KERNEL_DS);
90095 err = sys_clock_settime(which_clock,
90096- (struct timespec __user *) &ts);
90097+ (struct timespec __force_user *) &ts);
90098 set_fs(oldfs);
90099 return err;
90100 }
90101@@ -764,7 +765,7 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
90102 oldfs = get_fs();
90103 set_fs(KERNEL_DS);
90104 err = sys_clock_gettime(which_clock,
90105- (struct timespec __user *) &ts);
90106+ (struct timespec __force_user *) &ts);
90107 set_fs(oldfs);
90108 if (!err && compat_put_timespec(&ts, tp))
90109 return -EFAULT;
90110@@ -784,7 +785,7 @@ COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
90111
90112 oldfs = get_fs();
90113 set_fs(KERNEL_DS);
90114- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
90115+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
90116 set_fs(oldfs);
90117
90118 err = compat_put_timex(utp, &txc);
90119@@ -804,7 +805,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
90120 oldfs = get_fs();
90121 set_fs(KERNEL_DS);
90122 err = sys_clock_getres(which_clock,
90123- (struct timespec __user *) &ts);
90124+ (struct timespec __force_user *) &ts);
90125 set_fs(oldfs);
90126 if (!err && tp && compat_put_timespec(&ts, tp))
90127 return -EFAULT;
90128@@ -818,7 +819,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
90129 struct timespec tu;
90130 struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
90131
90132- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
90133+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
90134 oldfs = get_fs();
90135 set_fs(KERNEL_DS);
90136 err = clock_nanosleep_restart(restart);
90137@@ -850,8 +851,8 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
90138 oldfs = get_fs();
90139 set_fs(KERNEL_DS);
90140 err = sys_clock_nanosleep(which_clock, flags,
90141- (struct timespec __user *) &in,
90142- (struct timespec __user *) &out);
90143+ (struct timespec __force_user *) &in,
90144+ (struct timespec __force_user *) &out);
90145 set_fs(oldfs);
90146
90147 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
90148@@ -1145,7 +1146,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
90149 mm_segment_t old_fs = get_fs();
90150
90151 set_fs(KERNEL_DS);
90152- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
90153+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
90154 set_fs(old_fs);
90155 if (compat_put_timespec(&t, interval))
90156 return -EFAULT;
90157diff --git a/kernel/configs.c b/kernel/configs.c
90158index c18b1f1..b9a0132 100644
90159--- a/kernel/configs.c
90160+++ b/kernel/configs.c
90161@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
90162 struct proc_dir_entry *entry;
90163
90164 /* create the current config file */
90165+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
90166+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
90167+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
90168+ &ikconfig_file_ops);
90169+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
90170+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
90171+ &ikconfig_file_ops);
90172+#endif
90173+#else
90174 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
90175 &ikconfig_file_ops);
90176+#endif
90177+
90178 if (!entry)
90179 return -ENOMEM;
90180
90181diff --git a/kernel/cred.c b/kernel/cred.c
90182index e0573a4..26c0fd3 100644
90183--- a/kernel/cred.c
90184+++ b/kernel/cred.c
90185@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
90186 validate_creds(cred);
90187 alter_cred_subscribers(cred, -1);
90188 put_cred(cred);
90189+
90190+#ifdef CONFIG_GRKERNSEC_SETXID
90191+ cred = (struct cred *) tsk->delayed_cred;
90192+ if (cred != NULL) {
90193+ tsk->delayed_cred = NULL;
90194+ validate_creds(cred);
90195+ alter_cred_subscribers(cred, -1);
90196+ put_cred(cred);
90197+ }
90198+#endif
90199 }
90200
90201 /**
90202@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
90203 * Always returns 0 thus allowing this function to be tail-called at the end
90204 * of, say, sys_setgid().
90205 */
90206-int commit_creds(struct cred *new)
90207+static int __commit_creds(struct cred *new)
90208 {
90209 struct task_struct *task = current;
90210 const struct cred *old = task->real_cred;
90211@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
90212
90213 get_cred(new); /* we will require a ref for the subj creds too */
90214
90215+ gr_set_role_label(task, new->uid, new->gid);
90216+
90217 /* dumpability changes */
90218 if (!uid_eq(old->euid, new->euid) ||
90219 !gid_eq(old->egid, new->egid) ||
90220@@ -479,6 +491,105 @@ int commit_creds(struct cred *new)
90221 put_cred(old);
90222 return 0;
90223 }
90224+#ifdef CONFIG_GRKERNSEC_SETXID
90225+extern int set_user(struct cred *new);
90226+
90227+void gr_delayed_cred_worker(void)
90228+{
90229+ const struct cred *new = current->delayed_cred;
90230+ struct cred *ncred;
90231+
90232+ current->delayed_cred = NULL;
90233+
90234+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
90235+ // from doing get_cred on it when queueing this
90236+ put_cred(new);
90237+ return;
90238+ } else if (new == NULL)
90239+ return;
90240+
90241+ ncred = prepare_creds();
90242+ if (!ncred)
90243+ goto die;
90244+ // uids
90245+ ncred->uid = new->uid;
90246+ ncred->euid = new->euid;
90247+ ncred->suid = new->suid;
90248+ ncred->fsuid = new->fsuid;
90249+ // gids
90250+ ncred->gid = new->gid;
90251+ ncred->egid = new->egid;
90252+ ncred->sgid = new->sgid;
90253+ ncred->fsgid = new->fsgid;
90254+ // groups
90255+ set_groups(ncred, new->group_info);
90256+ // caps
90257+ ncred->securebits = new->securebits;
90258+ ncred->cap_inheritable = new->cap_inheritable;
90259+ ncred->cap_permitted = new->cap_permitted;
90260+ ncred->cap_effective = new->cap_effective;
90261+ ncred->cap_bset = new->cap_bset;
90262+
90263+ if (set_user(ncred)) {
90264+ abort_creds(ncred);
90265+ goto die;
90266+ }
90267+
90268+ // from doing get_cred on it when queueing this
90269+ put_cred(new);
90270+
90271+ __commit_creds(ncred);
90272+ return;
90273+die:
90274+ // from doing get_cred on it when queueing this
90275+ put_cred(new);
90276+ do_group_exit(SIGKILL);
90277+}
90278+#endif
90279+
90280+int commit_creds(struct cred *new)
90281+{
90282+#ifdef CONFIG_GRKERNSEC_SETXID
90283+ int ret;
90284+ int schedule_it = 0;
90285+ struct task_struct *t;
90286+ unsigned oldsecurebits = current_cred()->securebits;
90287+
90288+ /* we won't get called with tasklist_lock held for writing
90289+ and interrupts disabled as the cred struct in that case is
90290+ init_cred
90291+ */
90292+ if (grsec_enable_setxid && !current_is_single_threaded() &&
90293+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
90294+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
90295+ schedule_it = 1;
90296+ }
90297+ ret = __commit_creds(new);
90298+ if (schedule_it) {
90299+ rcu_read_lock();
90300+ read_lock(&tasklist_lock);
90301+ for (t = next_thread(current); t != current;
90302+ t = next_thread(t)) {
90303+ /* we'll check if the thread has uid 0 in
90304+ * the delayed worker routine
90305+ */
90306+ if (task_securebits(t) == oldsecurebits &&
90307+ t->delayed_cred == NULL) {
90308+ t->delayed_cred = get_cred(new);
90309+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
90310+ set_tsk_need_resched(t);
90311+ }
90312+ }
90313+ read_unlock(&tasklist_lock);
90314+ rcu_read_unlock();
90315+ }
90316+
90317+ return ret;
90318+#else
90319+ return __commit_creds(new);
90320+#endif
90321+}
90322+
90323 EXPORT_SYMBOL(commit_creds);
90324
90325 /**
90326diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
90327index 0874e2e..5b32cc9 100644
90328--- a/kernel/debug/debug_core.c
90329+++ b/kernel/debug/debug_core.c
90330@@ -127,7 +127,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
90331 */
90332 static atomic_t masters_in_kgdb;
90333 static atomic_t slaves_in_kgdb;
90334-static atomic_t kgdb_break_tasklet_var;
90335+static atomic_unchecked_t kgdb_break_tasklet_var;
90336 atomic_t kgdb_setting_breakpoint;
90337
90338 struct task_struct *kgdb_usethread;
90339@@ -137,7 +137,7 @@ int kgdb_single_step;
90340 static pid_t kgdb_sstep_pid;
90341
90342 /* to keep track of the CPU which is doing the single stepping*/
90343-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
90344+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
90345
90346 /*
90347 * If you are debugging a problem where roundup (the collection of
90348@@ -552,7 +552,7 @@ return_normal:
90349 * kernel will only try for the value of sstep_tries before
90350 * giving up and continuing on.
90351 */
90352- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
90353+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
90354 (kgdb_info[cpu].task &&
90355 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
90356 atomic_set(&kgdb_active, -1);
90357@@ -654,8 +654,8 @@ cpu_master_loop:
90358 }
90359
90360 kgdb_restore:
90361- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
90362- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
90363+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
90364+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
90365 if (kgdb_info[sstep_cpu].task)
90366 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
90367 else
90368@@ -949,18 +949,18 @@ static void kgdb_unregister_callbacks(void)
90369 static void kgdb_tasklet_bpt(unsigned long ing)
90370 {
90371 kgdb_breakpoint();
90372- atomic_set(&kgdb_break_tasklet_var, 0);
90373+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
90374 }
90375
90376 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
90377
90378 void kgdb_schedule_breakpoint(void)
90379 {
90380- if (atomic_read(&kgdb_break_tasklet_var) ||
90381+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
90382 atomic_read(&kgdb_active) != -1 ||
90383 atomic_read(&kgdb_setting_breakpoint))
90384 return;
90385- atomic_inc(&kgdb_break_tasklet_var);
90386+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
90387 tasklet_schedule(&kgdb_tasklet_breakpoint);
90388 }
90389 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
90390diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
90391index 4121345..861e178 100644
90392--- a/kernel/debug/kdb/kdb_main.c
90393+++ b/kernel/debug/kdb/kdb_main.c
90394@@ -2021,7 +2021,7 @@ static int kdb_lsmod(int argc, const char **argv)
90395 continue;
90396
90397 kdb_printf("%-20s%8u 0x%p ", mod->name,
90398- mod->core_size, (void *)mod);
90399+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
90400 #ifdef CONFIG_MODULE_UNLOAD
90401 kdb_printf("%4d ", module_refcount(mod));
90402 #endif
90403@@ -2031,7 +2031,7 @@ static int kdb_lsmod(int argc, const char **argv)
90404 kdb_printf(" (Loading)");
90405 else
90406 kdb_printf(" (Live)");
90407- kdb_printf(" 0x%p", mod->module_core);
90408+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
90409
90410 #ifdef CONFIG_MODULE_UNLOAD
90411 {
90412diff --git a/kernel/events/core.c b/kernel/events/core.c
90413index 2fabc06..79cceec 100644
90414--- a/kernel/events/core.c
90415+++ b/kernel/events/core.c
90416@@ -170,8 +170,15 @@ static struct srcu_struct pmus_srcu;
90417 * 0 - disallow raw tracepoint access for unpriv
90418 * 1 - disallow cpu events for unpriv
90419 * 2 - disallow kernel profiling for unpriv
90420+ * 3 - disallow all unpriv perf event use
90421 */
90422-int sysctl_perf_event_paranoid __read_mostly = 1;
90423+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
90424+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
90425+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
90426+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
90427+#else
90428+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
90429+#endif
90430
90431 /* Minimum for 512 kiB + 1 user control page */
90432 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
90433@@ -197,7 +204,7 @@ void update_perf_cpu_limits(void)
90434
90435 tmp *= sysctl_perf_cpu_time_max_percent;
90436 do_div(tmp, 100);
90437- ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
90438+ ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp;
90439 }
90440
90441 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
90442@@ -303,7 +310,7 @@ void perf_sample_event_took(u64 sample_len_ns)
90443 }
90444 }
90445
90446-static atomic64_t perf_event_id;
90447+static atomic64_unchecked_t perf_event_id;
90448
90449 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
90450 enum event_type_t event_type);
90451@@ -3220,7 +3227,7 @@ static void __perf_event_read(void *info)
90452
90453 static inline u64 perf_event_count(struct perf_event *event)
90454 {
90455- return local64_read(&event->count) + atomic64_read(&event->child_count);
90456+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
90457 }
90458
90459 static u64 perf_event_read(struct perf_event *event)
90460@@ -3656,9 +3663,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
90461 mutex_lock(&event->child_mutex);
90462 total += perf_event_read(event);
90463 *enabled += event->total_time_enabled +
90464- atomic64_read(&event->child_total_time_enabled);
90465+ atomic64_read_unchecked(&event->child_total_time_enabled);
90466 *running += event->total_time_running +
90467- atomic64_read(&event->child_total_time_running);
90468+ atomic64_read_unchecked(&event->child_total_time_running);
90469
90470 list_for_each_entry(child, &event->child_list, child_list) {
90471 total += perf_event_read(child);
90472@@ -4147,10 +4154,10 @@ void perf_event_update_userpage(struct perf_event *event)
90473 userpg->offset -= local64_read(&event->hw.prev_count);
90474
90475 userpg->time_enabled = enabled +
90476- atomic64_read(&event->child_total_time_enabled);
90477+ atomic64_read_unchecked(&event->child_total_time_enabled);
90478
90479 userpg->time_running = running +
90480- atomic64_read(&event->child_total_time_running);
90481+ atomic64_read_unchecked(&event->child_total_time_running);
90482
90483 arch_perf_update_userpage(event, userpg, now);
90484
90485@@ -4740,7 +4747,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
90486
90487 /* Data. */
90488 sp = perf_user_stack_pointer(regs);
90489- rem = __output_copy_user(handle, (void *) sp, dump_size);
90490+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
90491 dyn_size = dump_size - rem;
90492
90493 perf_output_skip(handle, rem);
90494@@ -4831,11 +4838,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
90495 values[n++] = perf_event_count(event);
90496 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
90497 values[n++] = enabled +
90498- atomic64_read(&event->child_total_time_enabled);
90499+ atomic64_read_unchecked(&event->child_total_time_enabled);
90500 }
90501 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
90502 values[n++] = running +
90503- atomic64_read(&event->child_total_time_running);
90504+ atomic64_read_unchecked(&event->child_total_time_running);
90505 }
90506 if (read_format & PERF_FORMAT_ID)
90507 values[n++] = primary_event_id(event);
90508@@ -7180,7 +7187,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
90509 event->parent = parent_event;
90510
90511 event->ns = get_pid_ns(task_active_pid_ns(current));
90512- event->id = atomic64_inc_return(&perf_event_id);
90513+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
90514
90515 event->state = PERF_EVENT_STATE_INACTIVE;
90516
90517@@ -7470,6 +7477,11 @@ SYSCALL_DEFINE5(perf_event_open,
90518 if (flags & ~PERF_FLAG_ALL)
90519 return -EINVAL;
90520
90521+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
90522+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
90523+ return -EACCES;
90524+#endif
90525+
90526 err = perf_copy_attr(attr_uptr, &attr);
90527 if (err)
90528 return err;
90529@@ -7892,10 +7904,10 @@ static void sync_child_event(struct perf_event *child_event,
90530 /*
90531 * Add back the child's count to the parent's count:
90532 */
90533- atomic64_add(child_val, &parent_event->child_count);
90534- atomic64_add(child_event->total_time_enabled,
90535+ atomic64_add_unchecked(child_val, &parent_event->child_count);
90536+ atomic64_add_unchecked(child_event->total_time_enabled,
90537 &parent_event->child_total_time_enabled);
90538- atomic64_add(child_event->total_time_running,
90539+ atomic64_add_unchecked(child_event->total_time_running,
90540 &parent_event->child_total_time_running);
90541
90542 /*
90543diff --git a/kernel/events/internal.h b/kernel/events/internal.h
90544index 569b2187..19940d9 100644
90545--- a/kernel/events/internal.h
90546+++ b/kernel/events/internal.h
90547@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
90548 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
90549 }
90550
90551-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
90552+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
90553 static inline unsigned long \
90554 func_name(struct perf_output_handle *handle, \
90555- const void *buf, unsigned long len) \
90556+ const void user *buf, unsigned long len) \
90557 { \
90558 unsigned long size, written; \
90559 \
90560@@ -117,7 +117,7 @@ memcpy_common(void *dst, const void *src, unsigned long n)
90561 return 0;
90562 }
90563
90564-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
90565+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
90566
90567 static inline unsigned long
90568 memcpy_skip(void *dst, const void *src, unsigned long n)
90569@@ -125,7 +125,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n)
90570 return 0;
90571 }
90572
90573-DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
90574+DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip, )
90575
90576 #ifndef arch_perf_out_copy_user
90577 #define arch_perf_out_copy_user arch_perf_out_copy_user
90578@@ -143,7 +143,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
90579 }
90580 #endif
90581
90582-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
90583+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
90584
90585 /* Callchain handling */
90586 extern struct perf_callchain_entry *
90587diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
90588index cb346f2..e4dc317 100644
90589--- a/kernel/events/uprobes.c
90590+++ b/kernel/events/uprobes.c
90591@@ -1670,7 +1670,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
90592 {
90593 struct page *page;
90594 uprobe_opcode_t opcode;
90595- int result;
90596+ long result;
90597
90598 pagefault_disable();
90599 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
90600diff --git a/kernel/exit.c b/kernel/exit.c
90601index feff10b..f623dd5 100644
90602--- a/kernel/exit.c
90603+++ b/kernel/exit.c
90604@@ -171,6 +171,10 @@ void release_task(struct task_struct *p)
90605 struct task_struct *leader;
90606 int zap_leader;
90607 repeat:
90608+#ifdef CONFIG_NET
90609+ gr_del_task_from_ip_table(p);
90610+#endif
90611+
90612 /* don't need to get the RCU readlock here - the process is dead and
90613 * can't be modifying its own credentials. But shut RCU-lockdep up */
90614 rcu_read_lock();
90615@@ -656,6 +660,8 @@ void do_exit(long code)
90616 int group_dead;
90617 TASKS_RCU(int tasks_rcu_i);
90618
90619+ set_fs(USER_DS);
90620+
90621 profile_task_exit(tsk);
90622
90623 WARN_ON(blk_needs_flush_plug(tsk));
90624@@ -672,7 +678,6 @@ void do_exit(long code)
90625 * mm_release()->clear_child_tid() from writing to a user-controlled
90626 * kernel address.
90627 */
90628- set_fs(USER_DS);
90629
90630 ptrace_event(PTRACE_EVENT_EXIT, code);
90631
90632@@ -730,6 +735,9 @@ void do_exit(long code)
90633 tsk->exit_code = code;
90634 taskstats_exit(tsk, group_dead);
90635
90636+ gr_acl_handle_psacct(tsk, code);
90637+ gr_acl_handle_exit();
90638+
90639 exit_mm(tsk);
90640
90641 if (group_dead)
90642@@ -849,7 +857,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
90643 * Take down every thread in the group. This is called by fatal signals
90644 * as well as by sys_exit_group (below).
90645 */
90646-void
90647+__noreturn void
90648 do_group_exit(int exit_code)
90649 {
90650 struct signal_struct *sig = current->signal;
90651diff --git a/kernel/fork.c b/kernel/fork.c
90652index cf65139..704476e 100644
90653--- a/kernel/fork.c
90654+++ b/kernel/fork.c
90655@@ -177,12 +177,54 @@ static void free_thread_info(struct thread_info *ti)
90656 void thread_info_cache_init(void)
90657 {
90658 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
90659- THREAD_SIZE, 0, NULL);
90660+ THREAD_SIZE, SLAB_USERCOPY, NULL);
90661 BUG_ON(thread_info_cache == NULL);
90662 }
90663 # endif
90664 #endif
90665
90666+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
90667+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
90668+ int node, void **lowmem_stack)
90669+{
90670+ struct page *pages[THREAD_SIZE / PAGE_SIZE];
90671+ void *ret = NULL;
90672+ unsigned int i;
90673+
90674+ *lowmem_stack = alloc_thread_info_node(tsk, node);
90675+ if (*lowmem_stack == NULL)
90676+ goto out;
90677+
90678+ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
90679+ pages[i] = virt_to_page(*lowmem_stack + (i * PAGE_SIZE));
90680+
90681+ /* use VM_IOREMAP to gain THREAD_SIZE alignment */
90682+ ret = vmap(pages, THREAD_SIZE / PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL);
90683+ if (ret == NULL) {
90684+ free_thread_info(*lowmem_stack);
90685+ *lowmem_stack = NULL;
90686+ }
90687+
90688+out:
90689+ return ret;
90690+}
90691+
90692+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
90693+{
90694+ unmap_process_stacks(tsk);
90695+}
90696+#else
90697+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
90698+ int node, void **lowmem_stack)
90699+{
90700+ return alloc_thread_info_node(tsk, node);
90701+}
90702+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
90703+{
90704+ free_thread_info(ti);
90705+}
90706+#endif
90707+
90708 /* SLAB cache for signal_struct structures (tsk->signal) */
90709 static struct kmem_cache *signal_cachep;
90710
90711@@ -201,18 +243,22 @@ struct kmem_cache *vm_area_cachep;
90712 /* SLAB cache for mm_struct structures (tsk->mm) */
90713 static struct kmem_cache *mm_cachep;
90714
90715-static void account_kernel_stack(struct thread_info *ti, int account)
90716+static void account_kernel_stack(struct task_struct *tsk, struct thread_info *ti, int account)
90717 {
90718+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
90719+ struct zone *zone = page_zone(virt_to_page(tsk->lowmem_stack));
90720+#else
90721 struct zone *zone = page_zone(virt_to_page(ti));
90722+#endif
90723
90724 mod_zone_page_state(zone, NR_KERNEL_STACK, account);
90725 }
90726
90727 void free_task(struct task_struct *tsk)
90728 {
90729- account_kernel_stack(tsk->stack, -1);
90730+ account_kernel_stack(tsk, tsk->stack, -1);
90731 arch_release_thread_info(tsk->stack);
90732- free_thread_info(tsk->stack);
90733+ gr_free_thread_info(tsk, tsk->stack);
90734 rt_mutex_debug_task_free(tsk);
90735 ftrace_graph_exit_task(tsk);
90736 put_seccomp_filter(tsk);
90737@@ -306,6 +352,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
90738 {
90739 struct task_struct *tsk;
90740 struct thread_info *ti;
90741+ void *lowmem_stack;
90742 int node = tsk_fork_get_node(orig);
90743 int err;
90744
90745@@ -313,7 +360,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
90746 if (!tsk)
90747 return NULL;
90748
90749- ti = alloc_thread_info_node(tsk, node);
90750+ ti = gr_alloc_thread_info_node(tsk, node, &lowmem_stack);
90751 if (!ti)
90752 goto free_tsk;
90753
90754@@ -322,6 +369,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
90755 goto free_ti;
90756
90757 tsk->stack = ti;
90758+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
90759+ tsk->lowmem_stack = lowmem_stack;
90760+#endif
90761 #ifdef CONFIG_SECCOMP
90762 /*
90763 * We must handle setting up seccomp filters once we're under
90764@@ -338,7 +388,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
90765 set_task_stack_end_magic(tsk);
90766
90767 #ifdef CONFIG_CC_STACKPROTECTOR
90768- tsk->stack_canary = get_random_int();
90769+ tsk->stack_canary = pax_get_random_long();
90770 #endif
90771
90772 /*
90773@@ -352,24 +402,89 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
90774 tsk->splice_pipe = NULL;
90775 tsk->task_frag.page = NULL;
90776
90777- account_kernel_stack(ti, 1);
90778+ account_kernel_stack(tsk, ti, 1);
90779
90780 return tsk;
90781
90782 free_ti:
90783- free_thread_info(ti);
90784+ gr_free_thread_info(tsk, ti);
90785 free_tsk:
90786 free_task_struct(tsk);
90787 return NULL;
90788 }
90789
90790 #ifdef CONFIG_MMU
90791-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
90792+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
90793+{
90794+ struct vm_area_struct *tmp;
90795+ unsigned long charge;
90796+ struct file *file;
90797+ int retval;
90798+
90799+ charge = 0;
90800+ if (mpnt->vm_flags & VM_ACCOUNT) {
90801+ unsigned long len = vma_pages(mpnt);
90802+
90803+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
90804+ goto fail_nomem;
90805+ charge = len;
90806+ }
90807+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
90808+ if (!tmp)
90809+ goto fail_nomem;
90810+ *tmp = *mpnt;
90811+ tmp->vm_mm = mm;
90812+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
90813+ retval = vma_dup_policy(mpnt, tmp);
90814+ if (retval)
90815+ goto fail_nomem_policy;
90816+ if (anon_vma_fork(tmp, mpnt))
90817+ goto fail_nomem_anon_vma_fork;
90818+ tmp->vm_flags &= ~VM_LOCKED;
90819+ tmp->vm_next = tmp->vm_prev = NULL;
90820+ tmp->vm_mirror = NULL;
90821+ file = tmp->vm_file;
90822+ if (file) {
90823+ struct inode *inode = file_inode(file);
90824+ struct address_space *mapping = file->f_mapping;
90825+
90826+ get_file(file);
90827+ if (tmp->vm_flags & VM_DENYWRITE)
90828+ atomic_dec(&inode->i_writecount);
90829+ i_mmap_lock_write(mapping);
90830+ if (tmp->vm_flags & VM_SHARED)
90831+ atomic_inc(&mapping->i_mmap_writable);
90832+ flush_dcache_mmap_lock(mapping);
90833+ /* insert tmp into the share list, just after mpnt */
90834+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
90835+ flush_dcache_mmap_unlock(mapping);
90836+ i_mmap_unlock_write(mapping);
90837+ }
90838+
90839+ /*
90840+ * Clear hugetlb-related page reserves for children. This only
90841+ * affects MAP_PRIVATE mappings. Faults generated by the child
90842+ * are not guaranteed to succeed, even if read-only
90843+ */
90844+ if (is_vm_hugetlb_page(tmp))
90845+ reset_vma_resv_huge_pages(tmp);
90846+
90847+ return tmp;
90848+
90849+fail_nomem_anon_vma_fork:
90850+ mpol_put(vma_policy(tmp));
90851+fail_nomem_policy:
90852+ kmem_cache_free(vm_area_cachep, tmp);
90853+fail_nomem:
90854+ vm_unacct_memory(charge);
90855+ return NULL;
90856+}
90857+
90858+static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
90859 {
90860 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
90861 struct rb_node **rb_link, *rb_parent;
90862 int retval;
90863- unsigned long charge;
90864
90865 uprobe_start_dup_mmap();
90866 down_write(&oldmm->mmap_sem);
90867@@ -397,51 +512,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
90868
90869 prev = NULL;
90870 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
90871- struct file *file;
90872-
90873 if (mpnt->vm_flags & VM_DONTCOPY) {
90874 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
90875 -vma_pages(mpnt));
90876 continue;
90877 }
90878- charge = 0;
90879- if (mpnt->vm_flags & VM_ACCOUNT) {
90880- unsigned long len = vma_pages(mpnt);
90881-
90882- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
90883- goto fail_nomem;
90884- charge = len;
90885- }
90886- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
90887- if (!tmp)
90888- goto fail_nomem;
90889- *tmp = *mpnt;
90890- INIT_LIST_HEAD(&tmp->anon_vma_chain);
90891- retval = vma_dup_policy(mpnt, tmp);
90892- if (retval)
90893- goto fail_nomem_policy;
90894- tmp->vm_mm = mm;
90895- if (anon_vma_fork(tmp, mpnt))
90896- goto fail_nomem_anon_vma_fork;
90897- tmp->vm_flags &= ~VM_LOCKED;
90898- tmp->vm_next = tmp->vm_prev = NULL;
90899- file = tmp->vm_file;
90900- if (file) {
90901- struct inode *inode = file_inode(file);
90902- struct address_space *mapping = file->f_mapping;
90903-
90904- get_file(file);
90905- if (tmp->vm_flags & VM_DENYWRITE)
90906- atomic_dec(&inode->i_writecount);
90907- i_mmap_lock_write(mapping);
90908- if (tmp->vm_flags & VM_SHARED)
90909- atomic_inc(&mapping->i_mmap_writable);
90910- flush_dcache_mmap_lock(mapping);
90911- /* insert tmp into the share list, just after mpnt */
90912- vma_interval_tree_insert_after(tmp, mpnt,
90913- &mapping->i_mmap);
90914- flush_dcache_mmap_unlock(mapping);
90915- i_mmap_unlock_write(mapping);
90916+ tmp = dup_vma(mm, oldmm, mpnt);
90917+ if (!tmp) {
90918+ retval = -ENOMEM;
90919+ goto out;
90920 }
90921
90922 /*
90923@@ -473,6 +552,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
90924 if (retval)
90925 goto out;
90926 }
90927+
90928+#ifdef CONFIG_PAX_SEGMEXEC
90929+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
90930+ struct vm_area_struct *mpnt_m;
90931+
90932+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
90933+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
90934+
90935+ if (!mpnt->vm_mirror)
90936+ continue;
90937+
90938+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
90939+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
90940+ mpnt->vm_mirror = mpnt_m;
90941+ } else {
90942+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
90943+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
90944+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
90945+ mpnt->vm_mirror->vm_mirror = mpnt;
90946+ }
90947+ }
90948+ BUG_ON(mpnt_m);
90949+ }
90950+#endif
90951+
90952 /* a new mm has just been created */
90953 arch_dup_mmap(oldmm, mm);
90954 retval = 0;
90955@@ -482,14 +586,6 @@ out:
90956 up_write(&oldmm->mmap_sem);
90957 uprobe_end_dup_mmap();
90958 return retval;
90959-fail_nomem_anon_vma_fork:
90960- mpol_put(vma_policy(tmp));
90961-fail_nomem_policy:
90962- kmem_cache_free(vm_area_cachep, tmp);
90963-fail_nomem:
90964- retval = -ENOMEM;
90965- vm_unacct_memory(charge);
90966- goto out;
90967 }
90968
90969 static inline int mm_alloc_pgd(struct mm_struct *mm)
90970@@ -739,8 +835,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
90971 return ERR_PTR(err);
90972
90973 mm = get_task_mm(task);
90974- if (mm && mm != current->mm &&
90975- !ptrace_may_access(task, mode)) {
90976+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
90977+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
90978 mmput(mm);
90979 mm = ERR_PTR(-EACCES);
90980 }
90981@@ -943,13 +1039,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
90982 spin_unlock(&fs->lock);
90983 return -EAGAIN;
90984 }
90985- fs->users++;
90986+ atomic_inc(&fs->users);
90987 spin_unlock(&fs->lock);
90988 return 0;
90989 }
90990 tsk->fs = copy_fs_struct(fs);
90991 if (!tsk->fs)
90992 return -ENOMEM;
90993+ /* Carry through gr_chroot_dentry and is_chrooted instead
90994+ of recomputing it here. Already copied when the task struct
90995+ is duplicated. This allows pivot_root to not be treated as
90996+ a chroot
90997+ */
90998+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
90999+
91000 return 0;
91001 }
91002
91003@@ -1187,7 +1290,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
91004 * parts of the process environment (as per the clone
91005 * flags). The actual kick-off is left to the caller.
91006 */
91007-static struct task_struct *copy_process(unsigned long clone_flags,
91008+static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
91009 unsigned long stack_start,
91010 unsigned long stack_size,
91011 int __user *child_tidptr,
91012@@ -1258,6 +1361,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
91013 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
91014 #endif
91015 retval = -EAGAIN;
91016+
91017+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
91018+
91019 if (atomic_read(&p->real_cred->user->processes) >=
91020 task_rlimit(p, RLIMIT_NPROC)) {
91021 if (p->real_cred->user != INIT_USER &&
91022@@ -1507,6 +1613,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
91023 goto bad_fork_free_pid;
91024 }
91025
91026+ /* synchronizes with gr_set_acls()
91027+ we need to call this past the point of no return for fork()
91028+ */
91029+ gr_copy_label(p);
91030+
91031 if (likely(p->pid)) {
91032 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
91033
91034@@ -1597,6 +1708,8 @@ bad_fork_cleanup_count:
91035 bad_fork_free:
91036 free_task(p);
91037 fork_out:
91038+ gr_log_forkfail(retval);
91039+
91040 return ERR_PTR(retval);
91041 }
91042
91043@@ -1658,6 +1771,7 @@ long do_fork(unsigned long clone_flags,
91044
91045 p = copy_process(clone_flags, stack_start, stack_size,
91046 child_tidptr, NULL, trace);
91047+ add_latent_entropy();
91048 /*
91049 * Do this prior waking up the new thread - the thread pointer
91050 * might get invalid after that point, if the thread exits quickly.
91051@@ -1674,6 +1788,8 @@ long do_fork(unsigned long clone_flags,
91052 if (clone_flags & CLONE_PARENT_SETTID)
91053 put_user(nr, parent_tidptr);
91054
91055+ gr_handle_brute_check();
91056+
91057 if (clone_flags & CLONE_VFORK) {
91058 p->vfork_done = &vfork;
91059 init_completion(&vfork);
91060@@ -1792,7 +1908,7 @@ void __init proc_caches_init(void)
91061 mm_cachep = kmem_cache_create("mm_struct",
91062 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
91063 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
91064- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
91065+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
91066 mmap_init();
91067 nsproxy_cache_init();
91068 }
91069@@ -1832,7 +1948,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
91070 return 0;
91071
91072 /* don't need lock here; in the worst case we'll do useless copy */
91073- if (fs->users == 1)
91074+ if (atomic_read(&fs->users) == 1)
91075 return 0;
91076
91077 *new_fsp = copy_fs_struct(fs);
91078@@ -1944,7 +2060,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
91079 fs = current->fs;
91080 spin_lock(&fs->lock);
91081 current->fs = new_fs;
91082- if (--fs->users)
91083+ gr_set_chroot_entries(current, &current->fs->root);
91084+ if (atomic_dec_return(&fs->users))
91085 new_fs = NULL;
91086 else
91087 new_fs = fs;
91088diff --git a/kernel/futex.c b/kernel/futex.c
91089index 2a5e383..878bac6 100644
91090--- a/kernel/futex.c
91091+++ b/kernel/futex.c
91092@@ -201,7 +201,7 @@ struct futex_pi_state {
91093 atomic_t refcount;
91094
91095 union futex_key key;
91096-};
91097+} __randomize_layout;
91098
91099 /**
91100 * struct futex_q - The hashed futex queue entry, one per waiting task
91101@@ -235,7 +235,7 @@ struct futex_q {
91102 struct rt_mutex_waiter *rt_waiter;
91103 union futex_key *requeue_pi_key;
91104 u32 bitset;
91105-};
91106+} __randomize_layout;
91107
91108 static const struct futex_q futex_q_init = {
91109 /* list gets initialized in queue_me()*/
91110@@ -402,6 +402,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
91111 struct page *page, *page_head;
91112 int err, ro = 0;
91113
91114+#ifdef CONFIG_PAX_SEGMEXEC
91115+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
91116+ return -EFAULT;
91117+#endif
91118+
91119 /*
91120 * The futex address must be "naturally" aligned.
91121 */
91122@@ -601,7 +606,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
91123
91124 static int get_futex_value_locked(u32 *dest, u32 __user *from)
91125 {
91126- int ret;
91127+ unsigned long ret;
91128
91129 pagefault_disable();
91130 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
91131@@ -3006,6 +3011,7 @@ static void __init futex_detect_cmpxchg(void)
91132 {
91133 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
91134 u32 curval;
91135+ mm_segment_t oldfs;
91136
91137 /*
91138 * This will fail and we want it. Some arch implementations do
91139@@ -3017,8 +3023,11 @@ static void __init futex_detect_cmpxchg(void)
91140 * implementation, the non-functional ones will return
91141 * -ENOSYS.
91142 */
91143+ oldfs = get_fs();
91144+ set_fs(USER_DS);
91145 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
91146 futex_cmpxchg_enabled = 1;
91147+ set_fs(oldfs);
91148 #endif
91149 }
91150
91151diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
91152index 55c8c93..9ba7ad6 100644
91153--- a/kernel/futex_compat.c
91154+++ b/kernel/futex_compat.c
91155@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
91156 return 0;
91157 }
91158
91159-static void __user *futex_uaddr(struct robust_list __user *entry,
91160+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
91161 compat_long_t futex_offset)
91162 {
91163 compat_uptr_t base = ptr_to_compat(entry);
91164diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
91165index b358a80..fc25240 100644
91166--- a/kernel/gcov/base.c
91167+++ b/kernel/gcov/base.c
91168@@ -114,11 +114,6 @@ void gcov_enable_events(void)
91169 }
91170
91171 #ifdef CONFIG_MODULES
91172-static inline int within(void *addr, void *start, unsigned long size)
91173-{
91174- return ((addr >= start) && (addr < start + size));
91175-}
91176-
91177 /* Update list and generate events when modules are unloaded. */
91178 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
91179 void *data)
91180@@ -133,7 +128,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
91181
91182 /* Remove entries located in module from linked list. */
91183 while ((info = gcov_info_next(info))) {
91184- if (within(info, mod->module_core, mod->core_size)) {
91185+ if (within_module_core_rw((unsigned long)info, mod)) {
91186 gcov_info_unlink(prev, info);
91187 if (gcov_events_enabled)
91188 gcov_event(GCOV_REMOVE, info);
91189diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
91190index 886d09e..c7ff4e5 100644
91191--- a/kernel/irq/manage.c
91192+++ b/kernel/irq/manage.c
91193@@ -874,7 +874,7 @@ static int irq_thread(void *data)
91194
91195 action_ret = handler_fn(desc, action);
91196 if (action_ret == IRQ_HANDLED)
91197- atomic_inc(&desc->threads_handled);
91198+ atomic_inc_unchecked(&desc->threads_handled);
91199
91200 wake_threads_waitq(desc);
91201 }
91202diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
91203index e2514b0..de3dfe0 100644
91204--- a/kernel/irq/spurious.c
91205+++ b/kernel/irq/spurious.c
91206@@ -337,7 +337,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
91207 * count. We just care about the count being
91208 * different than the one we saw before.
91209 */
91210- handled = atomic_read(&desc->threads_handled);
91211+ handled = atomic_read_unchecked(&desc->threads_handled);
91212 handled |= SPURIOUS_DEFERRED;
91213 if (handled != desc->threads_handled_last) {
91214 action_ret = IRQ_HANDLED;
91215diff --git a/kernel/jump_label.c b/kernel/jump_label.c
91216index 9019f15..9a3c42e 100644
91217--- a/kernel/jump_label.c
91218+++ b/kernel/jump_label.c
91219@@ -14,6 +14,7 @@
91220 #include <linux/err.h>
91221 #include <linux/static_key.h>
91222 #include <linux/jump_label_ratelimit.h>
91223+#include <linux/mm.h>
91224
91225 #ifdef HAVE_JUMP_LABEL
91226
91227@@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
91228
91229 size = (((unsigned long)stop - (unsigned long)start)
91230 / sizeof(struct jump_entry));
91231+ pax_open_kernel();
91232 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
91233+ pax_close_kernel();
91234 }
91235
91236 static void jump_label_update(struct static_key *key, int enable);
91237@@ -363,10 +366,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
91238 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
91239 struct jump_entry *iter;
91240
91241+ pax_open_kernel();
91242 for (iter = iter_start; iter < iter_stop; iter++) {
91243 if (within_module_init(iter->code, mod))
91244 iter->code = 0;
91245 }
91246+ pax_close_kernel();
91247 }
91248
91249 static int
91250diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
91251index 5c5987f..bc502b0 100644
91252--- a/kernel/kallsyms.c
91253+++ b/kernel/kallsyms.c
91254@@ -11,6 +11,9 @@
91255 * Changed the compression method from stem compression to "table lookup"
91256 * compression (see scripts/kallsyms.c for a more complete description)
91257 */
91258+#ifdef CONFIG_GRKERNSEC_HIDESYM
91259+#define __INCLUDED_BY_HIDESYM 1
91260+#endif
91261 #include <linux/kallsyms.h>
91262 #include <linux/module.h>
91263 #include <linux/init.h>
91264@@ -54,12 +57,33 @@ extern const unsigned long kallsyms_markers[] __weak;
91265
91266 static inline int is_kernel_inittext(unsigned long addr)
91267 {
91268+ if (system_state != SYSTEM_BOOTING)
91269+ return 0;
91270+
91271 if (addr >= (unsigned long)_sinittext
91272 && addr <= (unsigned long)_einittext)
91273 return 1;
91274 return 0;
91275 }
91276
91277+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
91278+#ifdef CONFIG_MODULES
91279+static inline int is_module_text(unsigned long addr)
91280+{
91281+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
91282+ return 1;
91283+
91284+ addr = ktla_ktva(addr);
91285+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
91286+}
91287+#else
91288+static inline int is_module_text(unsigned long addr)
91289+{
91290+ return 0;
91291+}
91292+#endif
91293+#endif
91294+
91295 static inline int is_kernel_text(unsigned long addr)
91296 {
91297 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
91298@@ -70,13 +94,28 @@ static inline int is_kernel_text(unsigned long addr)
91299
91300 static inline int is_kernel(unsigned long addr)
91301 {
91302+
91303+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
91304+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
91305+ return 1;
91306+
91307+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
91308+#else
91309 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
91310+#endif
91311+
91312 return 1;
91313 return in_gate_area_no_mm(addr);
91314 }
91315
91316 static int is_ksym_addr(unsigned long addr)
91317 {
91318+
91319+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
91320+ if (is_module_text(addr))
91321+ return 0;
91322+#endif
91323+
91324 if (all_var)
91325 return is_kernel(addr);
91326
91327@@ -481,7 +520,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
91328
91329 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
91330 {
91331- iter->name[0] = '\0';
91332 iter->nameoff = get_symbol_offset(new_pos);
91333 iter->pos = new_pos;
91334 }
91335@@ -529,6 +567,11 @@ static int s_show(struct seq_file *m, void *p)
91336 {
91337 struct kallsym_iter *iter = m->private;
91338
91339+#ifdef CONFIG_GRKERNSEC_HIDESYM
91340+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
91341+ return 0;
91342+#endif
91343+
91344 /* Some debugging symbols have no name. Ignore them. */
91345 if (!iter->name[0])
91346 return 0;
91347@@ -542,6 +585,7 @@ static int s_show(struct seq_file *m, void *p)
91348 */
91349 type = iter->exported ? toupper(iter->type) :
91350 tolower(iter->type);
91351+
91352 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
91353 type, iter->name, iter->module_name);
91354 } else
91355diff --git a/kernel/kcmp.c b/kernel/kcmp.c
91356index 0aa69ea..a7fcafb 100644
91357--- a/kernel/kcmp.c
91358+++ b/kernel/kcmp.c
91359@@ -100,6 +100,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
91360 struct task_struct *task1, *task2;
91361 int ret;
91362
91363+#ifdef CONFIG_GRKERNSEC
91364+ return -ENOSYS;
91365+#endif
91366+
91367 rcu_read_lock();
91368
91369 /*
91370diff --git a/kernel/kexec.c b/kernel/kexec.c
91371index 38c25b1..12b3f69 100644
91372--- a/kernel/kexec.c
91373+++ b/kernel/kexec.c
91374@@ -1348,7 +1348,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
91375 compat_ulong_t, flags)
91376 {
91377 struct compat_kexec_segment in;
91378- struct kexec_segment out, __user *ksegments;
91379+ struct kexec_segment out;
91380+ struct kexec_segment __user *ksegments;
91381 unsigned long i, result;
91382
91383 /* Don't allow clients that don't understand the native
91384diff --git a/kernel/kmod.c b/kernel/kmod.c
91385index 2777f40..a689506 100644
91386--- a/kernel/kmod.c
91387+++ b/kernel/kmod.c
91388@@ -68,7 +68,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
91389 kfree(info->argv);
91390 }
91391
91392-static int call_modprobe(char *module_name, int wait)
91393+static int call_modprobe(char *module_name, char *module_param, int wait)
91394 {
91395 struct subprocess_info *info;
91396 static char *envp[] = {
91397@@ -78,7 +78,7 @@ static int call_modprobe(char *module_name, int wait)
91398 NULL
91399 };
91400
91401- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
91402+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
91403 if (!argv)
91404 goto out;
91405
91406@@ -90,7 +90,8 @@ static int call_modprobe(char *module_name, int wait)
91407 argv[1] = "-q";
91408 argv[2] = "--";
91409 argv[3] = module_name; /* check free_modprobe_argv() */
91410- argv[4] = NULL;
91411+ argv[4] = module_param;
91412+ argv[5] = NULL;
91413
91414 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
91415 NULL, free_modprobe_argv, NULL);
91416@@ -122,9 +123,8 @@ out:
91417 * If module auto-loading support is disabled then this function
91418 * becomes a no-operation.
91419 */
91420-int __request_module(bool wait, const char *fmt, ...)
91421+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
91422 {
91423- va_list args;
91424 char module_name[MODULE_NAME_LEN];
91425 unsigned int max_modprobes;
91426 int ret;
91427@@ -143,9 +143,7 @@ int __request_module(bool wait, const char *fmt, ...)
91428 if (!modprobe_path[0])
91429 return 0;
91430
91431- va_start(args, fmt);
91432- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
91433- va_end(args);
91434+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
91435 if (ret >= MODULE_NAME_LEN)
91436 return -ENAMETOOLONG;
91437
91438@@ -153,6 +151,20 @@ int __request_module(bool wait, const char *fmt, ...)
91439 if (ret)
91440 return ret;
91441
91442+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91443+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
91444+ /* hack to workaround consolekit/udisks stupidity */
91445+ read_lock(&tasklist_lock);
91446+ if (!strcmp(current->comm, "mount") &&
91447+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
91448+ read_unlock(&tasklist_lock);
91449+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
91450+ return -EPERM;
91451+ }
91452+ read_unlock(&tasklist_lock);
91453+ }
91454+#endif
91455+
91456 /* If modprobe needs a service that is in a module, we get a recursive
91457 * loop. Limit the number of running kmod threads to max_threads/2 or
91458 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
91459@@ -181,16 +193,61 @@ int __request_module(bool wait, const char *fmt, ...)
91460
91461 trace_module_request(module_name, wait, _RET_IP_);
91462
91463- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
91464+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
91465
91466 atomic_dec(&kmod_concurrent);
91467 return ret;
91468 }
91469+
91470+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
91471+{
91472+ va_list args;
91473+ int ret;
91474+
91475+ va_start(args, fmt);
91476+ ret = ____request_module(wait, module_param, fmt, args);
91477+ va_end(args);
91478+
91479+ return ret;
91480+}
91481+
91482+int __request_module(bool wait, const char *fmt, ...)
91483+{
91484+ va_list args;
91485+ int ret;
91486+
91487+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91488+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
91489+ char module_param[MODULE_NAME_LEN];
91490+
91491+ memset(module_param, 0, sizeof(module_param));
91492+
91493+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
91494+
91495+ va_start(args, fmt);
91496+ ret = ____request_module(wait, module_param, fmt, args);
91497+ va_end(args);
91498+
91499+ return ret;
91500+ }
91501+#endif
91502+
91503+ va_start(args, fmt);
91504+ ret = ____request_module(wait, NULL, fmt, args);
91505+ va_end(args);
91506+
91507+ return ret;
91508+}
91509+
91510 EXPORT_SYMBOL(__request_module);
91511 #endif /* CONFIG_MODULES */
91512
91513 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
91514 {
91515+#ifdef CONFIG_GRKERNSEC
91516+ kfree(info->path);
91517+ info->path = info->origpath;
91518+#endif
91519 if (info->cleanup)
91520 (*info->cleanup)(info);
91521 kfree(info);
91522@@ -232,6 +289,21 @@ static int ____call_usermodehelper(void *data)
91523 */
91524 set_user_nice(current, 0);
91525
91526+#ifdef CONFIG_GRKERNSEC
91527+ /* this is race-free as far as userland is concerned as we copied
91528+ out the path to be used prior to this point and are now operating
91529+ on that copy
91530+ */
91531+ if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
91532+ strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7) &&
91533+ strncmp(sub_info->path, "/usr/libexec/", 13) && strncmp(sub_info->path, "/usr/bin/", 9) &&
91534+ strcmp(sub_info->path, "/usr/share/apport/apport")) || strstr(sub_info->path, "..")) {
91535+ printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of permitted system paths\n", sub_info->path);
91536+ retval = -EPERM;
91537+ goto out;
91538+ }
91539+#endif
91540+
91541 retval = -ENOMEM;
91542 new = prepare_kernel_cred(current);
91543 if (!new)
91544@@ -254,8 +326,8 @@ static int ____call_usermodehelper(void *data)
91545 commit_creds(new);
91546
91547 retval = do_execve(getname_kernel(sub_info->path),
91548- (const char __user *const __user *)sub_info->argv,
91549- (const char __user *const __user *)sub_info->envp);
91550+ (const char __user *const __force_user *)sub_info->argv,
91551+ (const char __user *const __force_user *)sub_info->envp);
91552 out:
91553 sub_info->retval = retval;
91554 /* wait_for_helper() will call umh_complete if UHM_WAIT_PROC. */
91555@@ -288,7 +360,7 @@ static int wait_for_helper(void *data)
91556 *
91557 * Thus the __user pointer cast is valid here.
91558 */
91559- sys_wait4(pid, (int __user *)&ret, 0, NULL);
91560+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
91561
91562 /*
91563 * If ret is 0, either ____call_usermodehelper failed and the
91564@@ -510,7 +582,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
91565 goto out;
91566
91567 INIT_WORK(&sub_info->work, __call_usermodehelper);
91568+#ifdef CONFIG_GRKERNSEC
91569+ sub_info->origpath = path;
91570+ sub_info->path = kstrdup(path, gfp_mask);
91571+#else
91572 sub_info->path = path;
91573+#endif
91574 sub_info->argv = argv;
91575 sub_info->envp = envp;
91576
91577@@ -612,7 +689,7 @@ EXPORT_SYMBOL(call_usermodehelper);
91578 static int proc_cap_handler(struct ctl_table *table, int write,
91579 void __user *buffer, size_t *lenp, loff_t *ppos)
91580 {
91581- struct ctl_table t;
91582+ ctl_table_no_const t;
91583 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
91584 kernel_cap_t new_cap;
91585 int err, i;
91586diff --git a/kernel/kprobes.c b/kernel/kprobes.c
91587index c90e417..e6c515d 100644
91588--- a/kernel/kprobes.c
91589+++ b/kernel/kprobes.c
91590@@ -31,6 +31,9 @@
91591 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
91592 * <prasanna@in.ibm.com> added function-return probes.
91593 */
91594+#ifdef CONFIG_GRKERNSEC_HIDESYM
91595+#define __INCLUDED_BY_HIDESYM 1
91596+#endif
91597 #include <linux/kprobes.h>
91598 #include <linux/hash.h>
91599 #include <linux/init.h>
91600@@ -122,12 +125,12 @@ enum kprobe_slot_state {
91601
91602 static void *alloc_insn_page(void)
91603 {
91604- return module_alloc(PAGE_SIZE);
91605+ return module_alloc_exec(PAGE_SIZE);
91606 }
91607
91608 static void free_insn_page(void *page)
91609 {
91610- module_memfree(page);
91611+ module_memfree_exec(page);
91612 }
91613
91614 struct kprobe_insn_cache kprobe_insn_slots = {
91615@@ -2198,11 +2201,11 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
91616 kprobe_type = "k";
91617
91618 if (sym)
91619- seq_printf(pi, "%p %s %s+0x%x %s ",
91620+ seq_printf(pi, "%pK %s %s+0x%x %s ",
91621 p->addr, kprobe_type, sym, offset,
91622 (modname ? modname : " "));
91623 else
91624- seq_printf(pi, "%p %s %p ",
91625+ seq_printf(pi, "%pK %s %pK ",
91626 p->addr, kprobe_type, p->addr);
91627
91628 if (!pp)
91629diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
91630index 6683cce..daf8999 100644
91631--- a/kernel/ksysfs.c
91632+++ b/kernel/ksysfs.c
91633@@ -50,6 +50,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
91634 {
91635 if (count+1 > UEVENT_HELPER_PATH_LEN)
91636 return -ENOENT;
91637+ if (!capable(CAP_SYS_ADMIN))
91638+ return -EPERM;
91639 memcpy(uevent_helper, buf, count);
91640 uevent_helper[count] = '\0';
91641 if (count && uevent_helper[count-1] == '\n')
91642@@ -176,7 +178,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
91643 return count;
91644 }
91645
91646-static struct bin_attribute notes_attr = {
91647+static bin_attribute_no_const notes_attr __read_only = {
91648 .attr = {
91649 .name = "notes",
91650 .mode = S_IRUGO,
91651diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
91652index ba77ab5..d6a3e20 100644
91653--- a/kernel/locking/lockdep.c
91654+++ b/kernel/locking/lockdep.c
91655@@ -599,6 +599,10 @@ static int static_obj(void *obj)
91656 end = (unsigned long) &_end,
91657 addr = (unsigned long) obj;
91658
91659+#ifdef CONFIG_PAX_KERNEXEC
91660+ start = ktla_ktva(start);
91661+#endif
91662+
91663 /*
91664 * static variable?
91665 */
91666@@ -743,6 +747,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
91667 if (!static_obj(lock->key)) {
91668 debug_locks_off();
91669 printk("INFO: trying to register non-static key.\n");
91670+ printk("lock:%pS key:%pS.\n", lock, lock->key);
91671 printk("the code is fine but needs lockdep annotation.\n");
91672 printk("turning off the locking correctness validator.\n");
91673 dump_stack();
91674@@ -3088,7 +3093,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
91675 if (!class)
91676 return 0;
91677 }
91678- atomic_inc((atomic_t *)&class->ops);
91679+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops);
91680 if (very_verbose(class)) {
91681 printk("\nacquire class [%p] %s", class->key, class->name);
91682 if (class->name_version > 1)
91683diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
91684index ef43ac4..2720dfa 100644
91685--- a/kernel/locking/lockdep_proc.c
91686+++ b/kernel/locking/lockdep_proc.c
91687@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
91688 return 0;
91689 }
91690
91691- seq_printf(m, "%p", class->key);
91692+ seq_printf(m, "%pK", class->key);
91693 #ifdef CONFIG_DEBUG_LOCKDEP
91694 seq_printf(m, " OPS:%8ld", class->ops);
91695 #endif
91696@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
91697
91698 list_for_each_entry(entry, &class->locks_after, entry) {
91699 if (entry->distance == 1) {
91700- seq_printf(m, " -> [%p] ", entry->class->key);
91701+ seq_printf(m, " -> [%pK] ", entry->class->key);
91702 print_name(m, entry->class);
91703 seq_puts(m, "\n");
91704 }
91705@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
91706 if (!class->key)
91707 continue;
91708
91709- seq_printf(m, "[%p] ", class->key);
91710+ seq_printf(m, "[%pK] ", class->key);
91711 print_name(m, class);
91712 seq_puts(m, "\n");
91713 }
91714@@ -496,7 +496,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
91715 if (!i)
91716 seq_line(m, '-', 40-namelen, namelen);
91717
91718- snprintf(ip, sizeof(ip), "[<%p>]",
91719+ snprintf(ip, sizeof(ip), "[<%pK>]",
91720 (void *)class->contention_point[i]);
91721 seq_printf(m, "%40s %14lu %29s %pS\n",
91722 name, stats->contention_point[i],
91723@@ -511,7 +511,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
91724 if (!i)
91725 seq_line(m, '-', 40-namelen, namelen);
91726
91727- snprintf(ip, sizeof(ip), "[<%p>]",
91728+ snprintf(ip, sizeof(ip), "[<%pK>]",
91729 (void *)class->contending_point[i]);
91730 seq_printf(m, "%40s %14lu %29s %pS\n",
91731 name, stats->contending_point[i],
91732diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
91733index d1fe2ba..180cd65e 100644
91734--- a/kernel/locking/mcs_spinlock.h
91735+++ b/kernel/locking/mcs_spinlock.h
91736@@ -78,7 +78,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
91737 */
91738 return;
91739 }
91740- ACCESS_ONCE(prev->next) = node;
91741+ ACCESS_ONCE_RW(prev->next) = node;
91742
91743 /* Wait until the lock holder passes the lock down. */
91744 arch_mcs_spin_lock_contended(&node->locked);
91745diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
91746index 3ef3736..9c951fa 100644
91747--- a/kernel/locking/mutex-debug.c
91748+++ b/kernel/locking/mutex-debug.c
91749@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
91750 }
91751
91752 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
91753- struct thread_info *ti)
91754+ struct task_struct *task)
91755 {
91756 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
91757
91758 /* Mark the current thread as blocked on the lock: */
91759- ti->task->blocked_on = waiter;
91760+ task->blocked_on = waiter;
91761 }
91762
91763 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
91764- struct thread_info *ti)
91765+ struct task_struct *task)
91766 {
91767 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
91768- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
91769- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
91770- ti->task->blocked_on = NULL;
91771+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
91772+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
91773+ task->blocked_on = NULL;
91774
91775 list_del_init(&waiter->list);
91776 waiter->task = NULL;
91777diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
91778index 0799fd3..d06ae3b 100644
91779--- a/kernel/locking/mutex-debug.h
91780+++ b/kernel/locking/mutex-debug.h
91781@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
91782 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
91783 extern void debug_mutex_add_waiter(struct mutex *lock,
91784 struct mutex_waiter *waiter,
91785- struct thread_info *ti);
91786+ struct task_struct *task);
91787 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
91788- struct thread_info *ti);
91789+ struct task_struct *task);
91790 extern void debug_mutex_unlock(struct mutex *lock);
91791 extern void debug_mutex_init(struct mutex *lock, const char *name,
91792 struct lock_class_key *key);
91793diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
91794index 94674e5..de4966f 100644
91795--- a/kernel/locking/mutex.c
91796+++ b/kernel/locking/mutex.c
91797@@ -542,7 +542,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
91798 goto skip_wait;
91799
91800 debug_mutex_lock_common(lock, &waiter);
91801- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
91802+ debug_mutex_add_waiter(lock, &waiter, task);
91803
91804 /* add waiting tasks to the end of the waitqueue (FIFO): */
91805 list_add_tail(&waiter.list, &lock->wait_list);
91806@@ -589,7 +589,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
91807 }
91808 __set_task_state(task, TASK_RUNNING);
91809
91810- mutex_remove_waiter(lock, &waiter, current_thread_info());
91811+ mutex_remove_waiter(lock, &waiter, task);
91812 /* set it to 0 if there are no waiters left: */
91813 if (likely(list_empty(&lock->wait_list)))
91814 atomic_set(&lock->count, 0);
91815@@ -610,7 +610,7 @@ skip_wait:
91816 return 0;
91817
91818 err:
91819- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
91820+ mutex_remove_waiter(lock, &waiter, task);
91821 spin_unlock_mutex(&lock->wait_lock, flags);
91822 debug_mutex_free_waiter(&waiter);
91823 mutex_release(&lock->dep_map, 1, ip);
91824diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
91825index c112d00..1946ad9 100644
91826--- a/kernel/locking/osq_lock.c
91827+++ b/kernel/locking/osq_lock.c
91828@@ -98,7 +98,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
91829
91830 prev = decode_cpu(old);
91831 node->prev = prev;
91832- ACCESS_ONCE(prev->next) = node;
91833+ ACCESS_ONCE_RW(prev->next) = node;
91834
91835 /*
91836 * Normally @prev is untouchable after the above store; because at that
91837@@ -170,8 +170,8 @@ unqueue:
91838 * it will wait in Step-A.
91839 */
91840
91841- ACCESS_ONCE(next->prev) = prev;
91842- ACCESS_ONCE(prev->next) = next;
91843+ ACCESS_ONCE_RW(next->prev) = prev;
91844+ ACCESS_ONCE_RW(prev->next) = next;
91845
91846 return false;
91847 }
91848@@ -193,11 +193,11 @@ void osq_unlock(struct optimistic_spin_queue *lock)
91849 node = this_cpu_ptr(&osq_node);
91850 next = xchg(&node->next, NULL);
91851 if (next) {
91852- ACCESS_ONCE(next->locked) = 1;
91853+ ACCESS_ONCE_RW(next->locked) = 1;
91854 return;
91855 }
91856
91857 next = osq_wait_next(lock, node, NULL);
91858 if (next)
91859- ACCESS_ONCE(next->locked) = 1;
91860+ ACCESS_ONCE_RW(next->locked) = 1;
91861 }
91862diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
91863index 1d96dd0..994ff19 100644
91864--- a/kernel/locking/rtmutex-tester.c
91865+++ b/kernel/locking/rtmutex-tester.c
91866@@ -22,7 +22,7 @@
91867 #define MAX_RT_TEST_MUTEXES 8
91868
91869 static spinlock_t rttest_lock;
91870-static atomic_t rttest_event;
91871+static atomic_unchecked_t rttest_event;
91872
91873 struct test_thread_data {
91874 int opcode;
91875@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
91876
91877 case RTTEST_LOCKCONT:
91878 td->mutexes[td->opdata] = 1;
91879- td->event = atomic_add_return(1, &rttest_event);
91880+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91881 return 0;
91882
91883 case RTTEST_RESET:
91884@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
91885 return 0;
91886
91887 case RTTEST_RESETEVENT:
91888- atomic_set(&rttest_event, 0);
91889+ atomic_set_unchecked(&rttest_event, 0);
91890 return 0;
91891
91892 default:
91893@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
91894 return ret;
91895
91896 td->mutexes[id] = 1;
91897- td->event = atomic_add_return(1, &rttest_event);
91898+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91899 rt_mutex_lock(&mutexes[id]);
91900- td->event = atomic_add_return(1, &rttest_event);
91901+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91902 td->mutexes[id] = 4;
91903 return 0;
91904
91905@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
91906 return ret;
91907
91908 td->mutexes[id] = 1;
91909- td->event = atomic_add_return(1, &rttest_event);
91910+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91911 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
91912- td->event = atomic_add_return(1, &rttest_event);
91913+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91914 td->mutexes[id] = ret ? 0 : 4;
91915 return ret ? -EINTR : 0;
91916
91917@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
91918 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
91919 return ret;
91920
91921- td->event = atomic_add_return(1, &rttest_event);
91922+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91923 rt_mutex_unlock(&mutexes[id]);
91924- td->event = atomic_add_return(1, &rttest_event);
91925+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91926 td->mutexes[id] = 0;
91927 return 0;
91928
91929@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
91930 break;
91931
91932 td->mutexes[dat] = 2;
91933- td->event = atomic_add_return(1, &rttest_event);
91934+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91935 break;
91936
91937 default:
91938@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
91939 return;
91940
91941 td->mutexes[dat] = 3;
91942- td->event = atomic_add_return(1, &rttest_event);
91943+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91944 break;
91945
91946 case RTTEST_LOCKNOWAIT:
91947@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
91948 return;
91949
91950 td->mutexes[dat] = 1;
91951- td->event = atomic_add_return(1, &rttest_event);
91952+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91953 return;
91954
91955 default:
91956diff --git a/kernel/module.c b/kernel/module.c
91957index ec53f59..67d9655 100644
91958--- a/kernel/module.c
91959+++ b/kernel/module.c
91960@@ -59,6 +59,7 @@
91961 #include <linux/jump_label.h>
91962 #include <linux/pfn.h>
91963 #include <linux/bsearch.h>
91964+#include <linux/grsecurity.h>
91965 #include <uapi/linux/module.h>
91966 #include "module-internal.h"
91967
91968@@ -155,7 +156,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
91969
91970 /* Bounds of module allocation, for speeding __module_address.
91971 * Protected by module_mutex. */
91972-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
91973+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
91974+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
91975
91976 int register_module_notifier(struct notifier_block *nb)
91977 {
91978@@ -322,7 +324,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
91979 return true;
91980
91981 list_for_each_entry_rcu(mod, &modules, list) {
91982- struct symsearch arr[] = {
91983+ struct symsearch modarr[] = {
91984 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
91985 NOT_GPL_ONLY, false },
91986 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
91987@@ -347,7 +349,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
91988 if (mod->state == MODULE_STATE_UNFORMED)
91989 continue;
91990
91991- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
91992+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
91993 return true;
91994 }
91995 return false;
91996@@ -487,7 +489,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
91997 if (!pcpusec->sh_size)
91998 return 0;
91999
92000- if (align > PAGE_SIZE) {
92001+ if (align-1 >= PAGE_SIZE) {
92002 pr_warn("%s: per-cpu alignment %li > %li\n",
92003 mod->name, align, PAGE_SIZE);
92004 align = PAGE_SIZE;
92005@@ -1053,7 +1055,7 @@ struct module_attribute module_uevent =
92006 static ssize_t show_coresize(struct module_attribute *mattr,
92007 struct module_kobject *mk, char *buffer)
92008 {
92009- return sprintf(buffer, "%u\n", mk->mod->core_size);
92010+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
92011 }
92012
92013 static struct module_attribute modinfo_coresize =
92014@@ -1062,7 +1064,7 @@ static struct module_attribute modinfo_coresize =
92015 static ssize_t show_initsize(struct module_attribute *mattr,
92016 struct module_kobject *mk, char *buffer)
92017 {
92018- return sprintf(buffer, "%u\n", mk->mod->init_size);
92019+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
92020 }
92021
92022 static struct module_attribute modinfo_initsize =
92023@@ -1154,12 +1156,29 @@ static int check_version(Elf_Shdr *sechdrs,
92024 goto bad_version;
92025 }
92026
92027+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
92028+ /*
92029+ * avoid potentially printing jibberish on attempted load
92030+ * of a module randomized with a different seed
92031+ */
92032+ pr_warn("no symbol version for %s\n", symname);
92033+#else
92034 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
92035+#endif
92036 return 0;
92037
92038 bad_version:
92039+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
92040+ /*
92041+ * avoid potentially printing jibberish on attempted load
92042+ * of a module randomized with a different seed
92043+ */
92044+ pr_warn("attempted module disagrees about version of symbol %s\n",
92045+ symname);
92046+#else
92047 pr_warn("%s: disagrees about version of symbol %s\n",
92048 mod->name, symname);
92049+#endif
92050 return 0;
92051 }
92052
92053@@ -1281,7 +1300,7 @@ resolve_symbol_wait(struct module *mod,
92054 */
92055 #ifdef CONFIG_SYSFS
92056
92057-#ifdef CONFIG_KALLSYMS
92058+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
92059 static inline bool sect_empty(const Elf_Shdr *sect)
92060 {
92061 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
92062@@ -1419,7 +1438,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
92063 {
92064 unsigned int notes, loaded, i;
92065 struct module_notes_attrs *notes_attrs;
92066- struct bin_attribute *nattr;
92067+ bin_attribute_no_const *nattr;
92068
92069 /* failed to create section attributes, so can't create notes */
92070 if (!mod->sect_attrs)
92071@@ -1531,7 +1550,7 @@ static void del_usage_links(struct module *mod)
92072 static int module_add_modinfo_attrs(struct module *mod)
92073 {
92074 struct module_attribute *attr;
92075- struct module_attribute *temp_attr;
92076+ module_attribute_no_const *temp_attr;
92077 int error = 0;
92078 int i;
92079
92080@@ -1741,21 +1760,21 @@ static void set_section_ro_nx(void *base,
92081
92082 static void unset_module_core_ro_nx(struct module *mod)
92083 {
92084- set_page_attributes(mod->module_core + mod->core_text_size,
92085- mod->module_core + mod->core_size,
92086+ set_page_attributes(mod->module_core_rw,
92087+ mod->module_core_rw + mod->core_size_rw,
92088 set_memory_x);
92089- set_page_attributes(mod->module_core,
92090- mod->module_core + mod->core_ro_size,
92091+ set_page_attributes(mod->module_core_rx,
92092+ mod->module_core_rx + mod->core_size_rx,
92093 set_memory_rw);
92094 }
92095
92096 static void unset_module_init_ro_nx(struct module *mod)
92097 {
92098- set_page_attributes(mod->module_init + mod->init_text_size,
92099- mod->module_init + mod->init_size,
92100+ set_page_attributes(mod->module_init_rw,
92101+ mod->module_init_rw + mod->init_size_rw,
92102 set_memory_x);
92103- set_page_attributes(mod->module_init,
92104- mod->module_init + mod->init_ro_size,
92105+ set_page_attributes(mod->module_init_rx,
92106+ mod->module_init_rx + mod->init_size_rx,
92107 set_memory_rw);
92108 }
92109
92110@@ -1768,14 +1787,14 @@ void set_all_modules_text_rw(void)
92111 list_for_each_entry_rcu(mod, &modules, list) {
92112 if (mod->state == MODULE_STATE_UNFORMED)
92113 continue;
92114- if ((mod->module_core) && (mod->core_text_size)) {
92115- set_page_attributes(mod->module_core,
92116- mod->module_core + mod->core_text_size,
92117+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
92118+ set_page_attributes(mod->module_core_rx,
92119+ mod->module_core_rx + mod->core_size_rx,
92120 set_memory_rw);
92121 }
92122- if ((mod->module_init) && (mod->init_text_size)) {
92123- set_page_attributes(mod->module_init,
92124- mod->module_init + mod->init_text_size,
92125+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
92126+ set_page_attributes(mod->module_init_rx,
92127+ mod->module_init_rx + mod->init_size_rx,
92128 set_memory_rw);
92129 }
92130 }
92131@@ -1791,14 +1810,14 @@ void set_all_modules_text_ro(void)
92132 list_for_each_entry_rcu(mod, &modules, list) {
92133 if (mod->state == MODULE_STATE_UNFORMED)
92134 continue;
92135- if ((mod->module_core) && (mod->core_text_size)) {
92136- set_page_attributes(mod->module_core,
92137- mod->module_core + mod->core_text_size,
92138+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
92139+ set_page_attributes(mod->module_core_rx,
92140+ mod->module_core_rx + mod->core_size_rx,
92141 set_memory_ro);
92142 }
92143- if ((mod->module_init) && (mod->init_text_size)) {
92144- set_page_attributes(mod->module_init,
92145- mod->module_init + mod->init_text_size,
92146+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
92147+ set_page_attributes(mod->module_init_rx,
92148+ mod->module_init_rx + mod->init_size_rx,
92149 set_memory_ro);
92150 }
92151 }
92152@@ -1807,7 +1826,15 @@ void set_all_modules_text_ro(void)
92153 #else
92154 static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { }
92155 static void unset_module_core_ro_nx(struct module *mod) { }
92156-static void unset_module_init_ro_nx(struct module *mod) { }
92157+static void unset_module_init_ro_nx(struct module *mod)
92158+{
92159+
92160+#ifdef CONFIG_PAX_KERNEXEC
92161+ set_memory_nx((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
92162+ set_memory_rw((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
92163+#endif
92164+
92165+}
92166 #endif
92167
92168 void __weak module_memfree(void *module_region)
92169@@ -1861,16 +1888,19 @@ static void free_module(struct module *mod)
92170 /* This may be NULL, but that's OK */
92171 unset_module_init_ro_nx(mod);
92172 module_arch_freeing_init(mod);
92173- module_memfree(mod->module_init);
92174+ module_memfree(mod->module_init_rw);
92175+ module_memfree_exec(mod->module_init_rx);
92176 kfree(mod->args);
92177 percpu_modfree(mod);
92178
92179 /* Free lock-classes; relies on the preceding sync_rcu(). */
92180- lockdep_free_key_range(mod->module_core, mod->core_size);
92181+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
92182+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
92183
92184 /* Finally, free the core (containing the module structure) */
92185 unset_module_core_ro_nx(mod);
92186- module_memfree(mod->module_core);
92187+ module_memfree_exec(mod->module_core_rx);
92188+ module_memfree(mod->module_core_rw);
92189
92190 #ifdef CONFIG_MPU
92191 update_protections(current->mm);
92192@@ -1939,9 +1969,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
92193 int ret = 0;
92194 const struct kernel_symbol *ksym;
92195
92196+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92197+ int is_fs_load = 0;
92198+ int register_filesystem_found = 0;
92199+ char *p;
92200+
92201+ p = strstr(mod->args, "grsec_modharden_fs");
92202+ if (p) {
92203+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
92204+ /* copy \0 as well */
92205+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
92206+ is_fs_load = 1;
92207+ }
92208+#endif
92209+
92210 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
92211 const char *name = info->strtab + sym[i].st_name;
92212
92213+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92214+ /* it's a real shame this will never get ripped and copied
92215+ upstream! ;(
92216+ */
92217+ if (is_fs_load && !strcmp(name, "register_filesystem"))
92218+ register_filesystem_found = 1;
92219+#endif
92220+
92221 switch (sym[i].st_shndx) {
92222 case SHN_COMMON:
92223 /* Ignore common symbols */
92224@@ -1966,7 +2018,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
92225 ksym = resolve_symbol_wait(mod, info, name);
92226 /* Ok if resolved. */
92227 if (ksym && !IS_ERR(ksym)) {
92228+ pax_open_kernel();
92229 sym[i].st_value = ksym->value;
92230+ pax_close_kernel();
92231 break;
92232 }
92233
92234@@ -1985,11 +2039,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
92235 secbase = (unsigned long)mod_percpu(mod);
92236 else
92237 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
92238+ pax_open_kernel();
92239 sym[i].st_value += secbase;
92240+ pax_close_kernel();
92241 break;
92242 }
92243 }
92244
92245+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92246+ if (is_fs_load && !register_filesystem_found) {
92247+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
92248+ ret = -EPERM;
92249+ }
92250+#endif
92251+
92252 return ret;
92253 }
92254
92255@@ -2073,22 +2136,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
92256 || s->sh_entsize != ~0UL
92257 || strstarts(sname, ".init"))
92258 continue;
92259- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
92260+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
92261+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
92262+ else
92263+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
92264 pr_debug("\t%s\n", sname);
92265 }
92266- switch (m) {
92267- case 0: /* executable */
92268- mod->core_size = debug_align(mod->core_size);
92269- mod->core_text_size = mod->core_size;
92270- break;
92271- case 1: /* RO: text and ro-data */
92272- mod->core_size = debug_align(mod->core_size);
92273- mod->core_ro_size = mod->core_size;
92274- break;
92275- case 3: /* whole core */
92276- mod->core_size = debug_align(mod->core_size);
92277- break;
92278- }
92279 }
92280
92281 pr_debug("Init section allocation order:\n");
92282@@ -2102,23 +2155,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
92283 || s->sh_entsize != ~0UL
92284 || !strstarts(sname, ".init"))
92285 continue;
92286- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
92287- | INIT_OFFSET_MASK);
92288+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
92289+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
92290+ else
92291+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
92292+ s->sh_entsize |= INIT_OFFSET_MASK;
92293 pr_debug("\t%s\n", sname);
92294 }
92295- switch (m) {
92296- case 0: /* executable */
92297- mod->init_size = debug_align(mod->init_size);
92298- mod->init_text_size = mod->init_size;
92299- break;
92300- case 1: /* RO: text and ro-data */
92301- mod->init_size = debug_align(mod->init_size);
92302- mod->init_ro_size = mod->init_size;
92303- break;
92304- case 3: /* whole init */
92305- mod->init_size = debug_align(mod->init_size);
92306- break;
92307- }
92308 }
92309 }
92310
92311@@ -2291,7 +2334,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
92312
92313 /* Put symbol section at end of init part of module. */
92314 symsect->sh_flags |= SHF_ALLOC;
92315- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
92316+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
92317 info->index.sym) | INIT_OFFSET_MASK;
92318 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
92319
92320@@ -2308,16 +2351,16 @@ static void layout_symtab(struct module *mod, struct load_info *info)
92321 }
92322
92323 /* Append room for core symbols at end of core part. */
92324- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
92325- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
92326- mod->core_size += strtab_size;
92327- mod->core_size = debug_align(mod->core_size);
92328+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
92329+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
92330+ mod->core_size_rx += strtab_size;
92331+ mod->core_size_rx = debug_align(mod->core_size_rx);
92332
92333 /* Put string table section at end of init part of module. */
92334 strsect->sh_flags |= SHF_ALLOC;
92335- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
92336+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
92337 info->index.str) | INIT_OFFSET_MASK;
92338- mod->init_size = debug_align(mod->init_size);
92339+ mod->init_size_rx = debug_align(mod->init_size_rx);
92340 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
92341 }
92342
92343@@ -2334,12 +2377,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
92344 /* Make sure we get permanent strtab: don't use info->strtab. */
92345 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
92346
92347+ pax_open_kernel();
92348+
92349 /* Set types up while we still have access to sections. */
92350 for (i = 0; i < mod->num_symtab; i++)
92351 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
92352
92353- mod->core_symtab = dst = mod->module_core + info->symoffs;
92354- mod->core_strtab = s = mod->module_core + info->stroffs;
92355+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
92356+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
92357 src = mod->symtab;
92358 for (ndst = i = 0; i < mod->num_symtab; i++) {
92359 if (i == 0 ||
92360@@ -2351,6 +2396,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
92361 }
92362 }
92363 mod->core_num_syms = ndst;
92364+
92365+ pax_close_kernel();
92366 }
92367 #else
92368 static inline void layout_symtab(struct module *mod, struct load_info *info)
92369@@ -2384,17 +2431,33 @@ void * __weak module_alloc(unsigned long size)
92370 return vmalloc_exec(size);
92371 }
92372
92373-static void *module_alloc_update_bounds(unsigned long size)
92374+static void *module_alloc_update_bounds_rw(unsigned long size)
92375 {
92376 void *ret = module_alloc(size);
92377
92378 if (ret) {
92379 mutex_lock(&module_mutex);
92380 /* Update module bounds. */
92381- if ((unsigned long)ret < module_addr_min)
92382- module_addr_min = (unsigned long)ret;
92383- if ((unsigned long)ret + size > module_addr_max)
92384- module_addr_max = (unsigned long)ret + size;
92385+ if ((unsigned long)ret < module_addr_min_rw)
92386+ module_addr_min_rw = (unsigned long)ret;
92387+ if ((unsigned long)ret + size > module_addr_max_rw)
92388+ module_addr_max_rw = (unsigned long)ret + size;
92389+ mutex_unlock(&module_mutex);
92390+ }
92391+ return ret;
92392+}
92393+
92394+static void *module_alloc_update_bounds_rx(unsigned long size)
92395+{
92396+ void *ret = module_alloc_exec(size);
92397+
92398+ if (ret) {
92399+ mutex_lock(&module_mutex);
92400+ /* Update module bounds. */
92401+ if ((unsigned long)ret < module_addr_min_rx)
92402+ module_addr_min_rx = (unsigned long)ret;
92403+ if ((unsigned long)ret + size > module_addr_max_rx)
92404+ module_addr_max_rx = (unsigned long)ret + size;
92405 mutex_unlock(&module_mutex);
92406 }
92407 return ret;
92408@@ -2665,7 +2728,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
92409 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
92410
92411 if (info->index.sym == 0) {
92412+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
92413+ /*
92414+ * avoid potentially printing jibberish on attempted load
92415+ * of a module randomized with a different seed
92416+ */
92417+ pr_warn("module has no symbols (stripped?)\n");
92418+#else
92419 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
92420+#endif
92421 return ERR_PTR(-ENOEXEC);
92422 }
92423
92424@@ -2681,8 +2752,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
92425 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
92426 {
92427 const char *modmagic = get_modinfo(info, "vermagic");
92428+ const char *license = get_modinfo(info, "license");
92429 int err;
92430
92431+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
92432+ if (!license || !license_is_gpl_compatible(license))
92433+ return -ENOEXEC;
92434+#endif
92435+
92436 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
92437 modmagic = NULL;
92438
92439@@ -2707,7 +2784,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
92440 }
92441
92442 /* Set up license info based on the info section */
92443- set_license(mod, get_modinfo(info, "license"));
92444+ set_license(mod, license);
92445
92446 return 0;
92447 }
92448@@ -2801,7 +2878,7 @@ static int move_module(struct module *mod, struct load_info *info)
92449 void *ptr;
92450
92451 /* Do the allocs. */
92452- ptr = module_alloc_update_bounds(mod->core_size);
92453+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
92454 /*
92455 * The pointer to this block is stored in the module structure
92456 * which is inside the block. Just mark it as not being a
92457@@ -2811,11 +2888,11 @@ static int move_module(struct module *mod, struct load_info *info)
92458 if (!ptr)
92459 return -ENOMEM;
92460
92461- memset(ptr, 0, mod->core_size);
92462- mod->module_core = ptr;
92463+ memset(ptr, 0, mod->core_size_rw);
92464+ mod->module_core_rw = ptr;
92465
92466- if (mod->init_size) {
92467- ptr = module_alloc_update_bounds(mod->init_size);
92468+ if (mod->init_size_rw) {
92469+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
92470 /*
92471 * The pointer to this block is stored in the module structure
92472 * which is inside the block. This block doesn't need to be
92473@@ -2824,13 +2901,45 @@ static int move_module(struct module *mod, struct load_info *info)
92474 */
92475 kmemleak_ignore(ptr);
92476 if (!ptr) {
92477- module_memfree(mod->module_core);
92478+ module_memfree(mod->module_core_rw);
92479 return -ENOMEM;
92480 }
92481- memset(ptr, 0, mod->init_size);
92482- mod->module_init = ptr;
92483+ memset(ptr, 0, mod->init_size_rw);
92484+ mod->module_init_rw = ptr;
92485 } else
92486- mod->module_init = NULL;
92487+ mod->module_init_rw = NULL;
92488+
92489+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
92490+ kmemleak_not_leak(ptr);
92491+ if (!ptr) {
92492+ if (mod->module_init_rw)
92493+ module_memfree(mod->module_init_rw);
92494+ module_memfree(mod->module_core_rw);
92495+ return -ENOMEM;
92496+ }
92497+
92498+ pax_open_kernel();
92499+ memset(ptr, 0, mod->core_size_rx);
92500+ pax_close_kernel();
92501+ mod->module_core_rx = ptr;
92502+
92503+ if (mod->init_size_rx) {
92504+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
92505+ kmemleak_ignore(ptr);
92506+ if (!ptr && mod->init_size_rx) {
92507+ module_memfree_exec(mod->module_core_rx);
92508+ if (mod->module_init_rw)
92509+ module_memfree(mod->module_init_rw);
92510+ module_memfree(mod->module_core_rw);
92511+ return -ENOMEM;
92512+ }
92513+
92514+ pax_open_kernel();
92515+ memset(ptr, 0, mod->init_size_rx);
92516+ pax_close_kernel();
92517+ mod->module_init_rx = ptr;
92518+ } else
92519+ mod->module_init_rx = NULL;
92520
92521 /* Transfer each section which specifies SHF_ALLOC */
92522 pr_debug("final section addresses:\n");
92523@@ -2841,16 +2950,45 @@ static int move_module(struct module *mod, struct load_info *info)
92524 if (!(shdr->sh_flags & SHF_ALLOC))
92525 continue;
92526
92527- if (shdr->sh_entsize & INIT_OFFSET_MASK)
92528- dest = mod->module_init
92529- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
92530- else
92531- dest = mod->module_core + shdr->sh_entsize;
92532+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
92533+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
92534+ dest = mod->module_init_rw
92535+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
92536+ else
92537+ dest = mod->module_init_rx
92538+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
92539+ } else {
92540+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
92541+ dest = mod->module_core_rw + shdr->sh_entsize;
92542+ else
92543+ dest = mod->module_core_rx + shdr->sh_entsize;
92544+ }
92545+
92546+ if (shdr->sh_type != SHT_NOBITS) {
92547+
92548+#ifdef CONFIG_PAX_KERNEXEC
92549+#ifdef CONFIG_X86_64
92550+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
92551+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
92552+#endif
92553+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
92554+ pax_open_kernel();
92555+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
92556+ pax_close_kernel();
92557+ } else
92558+#endif
92559
92560- if (shdr->sh_type != SHT_NOBITS)
92561 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
92562+ }
92563 /* Update sh_addr to point to copy in image. */
92564- shdr->sh_addr = (unsigned long)dest;
92565+
92566+#ifdef CONFIG_PAX_KERNEXEC
92567+ if (shdr->sh_flags & SHF_EXECINSTR)
92568+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
92569+ else
92570+#endif
92571+
92572+ shdr->sh_addr = (unsigned long)dest;
92573 pr_debug("\t0x%lx %s\n",
92574 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
92575 }
92576@@ -2907,12 +3045,12 @@ static void flush_module_icache(const struct module *mod)
92577 * Do it before processing of module parameters, so the module
92578 * can provide parameter accessor functions of its own.
92579 */
92580- if (mod->module_init)
92581- flush_icache_range((unsigned long)mod->module_init,
92582- (unsigned long)mod->module_init
92583- + mod->init_size);
92584- flush_icache_range((unsigned long)mod->module_core,
92585- (unsigned long)mod->module_core + mod->core_size);
92586+ if (mod->module_init_rx)
92587+ flush_icache_range((unsigned long)mod->module_init_rx,
92588+ (unsigned long)mod->module_init_rx
92589+ + mod->init_size_rx);
92590+ flush_icache_range((unsigned long)mod->module_core_rx,
92591+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
92592
92593 set_fs(old_fs);
92594 }
92595@@ -2970,8 +3108,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
92596 {
92597 percpu_modfree(mod);
92598 module_arch_freeing_init(mod);
92599- module_memfree(mod->module_init);
92600- module_memfree(mod->module_core);
92601+ module_memfree_exec(mod->module_init_rx);
92602+ module_memfree_exec(mod->module_core_rx);
92603+ module_memfree(mod->module_init_rw);
92604+ module_memfree(mod->module_core_rw);
92605 }
92606
92607 int __weak module_finalize(const Elf_Ehdr *hdr,
92608@@ -2984,7 +3124,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
92609 static int post_relocation(struct module *mod, const struct load_info *info)
92610 {
92611 /* Sort exception table now relocations are done. */
92612+ pax_open_kernel();
92613 sort_extable(mod->extable, mod->extable + mod->num_exentries);
92614+ pax_close_kernel();
92615
92616 /* Copy relocated percpu area over. */
92617 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
92618@@ -3032,13 +3174,15 @@ static void do_mod_ctors(struct module *mod)
92619 /* For freeing module_init on success, in case kallsyms traversing */
92620 struct mod_initfree {
92621 struct rcu_head rcu;
92622- void *module_init;
92623+ void *module_init_rw;
92624+ void *module_init_rx;
92625 };
92626
92627 static void do_free_init(struct rcu_head *head)
92628 {
92629 struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
92630- module_memfree(m->module_init);
92631+ module_memfree(m->module_init_rw);
92632+ module_memfree_exec(m->module_init_rx);
92633 kfree(m);
92634 }
92635
92636@@ -3058,7 +3202,8 @@ static noinline int do_init_module(struct module *mod)
92637 ret = -ENOMEM;
92638 goto fail;
92639 }
92640- freeinit->module_init = mod->module_init;
92641+ freeinit->module_init_rw = mod->module_init_rw;
92642+ freeinit->module_init_rx = mod->module_init_rx;
92643
92644 /*
92645 * We want to find out whether @mod uses async during init. Clear
92646@@ -3117,10 +3262,10 @@ static noinline int do_init_module(struct module *mod)
92647 #endif
92648 unset_module_init_ro_nx(mod);
92649 module_arch_freeing_init(mod);
92650- mod->module_init = NULL;
92651- mod->init_size = 0;
92652- mod->init_ro_size = 0;
92653- mod->init_text_size = 0;
92654+ mod->module_init_rw = NULL;
92655+ mod->module_init_rx = NULL;
92656+ mod->init_size_rw = 0;
92657+ mod->init_size_rx = 0;
92658 /*
92659 * We want to free module_init, but be aware that kallsyms may be
92660 * walking this with preempt disabled. In all the failure paths,
92661@@ -3208,16 +3353,16 @@ static int complete_formation(struct module *mod, struct load_info *info)
92662 module_bug_finalize(info->hdr, info->sechdrs, mod);
92663
92664 /* Set RO and NX regions for core */
92665- set_section_ro_nx(mod->module_core,
92666- mod->core_text_size,
92667- mod->core_ro_size,
92668- mod->core_size);
92669+ set_section_ro_nx(mod->module_core_rx,
92670+ mod->core_size_rx,
92671+ mod->core_size_rx,
92672+ mod->core_size_rx);
92673
92674 /* Set RO and NX regions for init */
92675- set_section_ro_nx(mod->module_init,
92676- mod->init_text_size,
92677- mod->init_ro_size,
92678- mod->init_size);
92679+ set_section_ro_nx(mod->module_init_rx,
92680+ mod->init_size_rx,
92681+ mod->init_size_rx,
92682+ mod->init_size_rx);
92683
92684 /* Mark state as coming so strong_try_module_get() ignores us,
92685 * but kallsyms etc. can see us. */
92686@@ -3301,9 +3446,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
92687 if (err)
92688 goto free_unload;
92689
92690+ /* Now copy in args */
92691+ mod->args = strndup_user(uargs, ~0UL >> 1);
92692+ if (IS_ERR(mod->args)) {
92693+ err = PTR_ERR(mod->args);
92694+ goto free_unload;
92695+ }
92696+
92697 /* Set up MODINFO_ATTR fields */
92698 setup_modinfo(mod, info);
92699
92700+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92701+ {
92702+ char *p, *p2;
92703+
92704+ if (strstr(mod->args, "grsec_modharden_netdev")) {
92705+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
92706+ err = -EPERM;
92707+ goto free_modinfo;
92708+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
92709+ p += sizeof("grsec_modharden_normal") - 1;
92710+ p2 = strstr(p, "_");
92711+ if (p2) {
92712+ *p2 = '\0';
92713+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
92714+ *p2 = '_';
92715+ }
92716+ err = -EPERM;
92717+ goto free_modinfo;
92718+ }
92719+ }
92720+#endif
92721+
92722 /* Fix up syms, so that st_value is a pointer to location. */
92723 err = simplify_symbols(mod, info);
92724 if (err < 0)
92725@@ -3319,13 +3493,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
92726
92727 flush_module_icache(mod);
92728
92729- /* Now copy in args */
92730- mod->args = strndup_user(uargs, ~0UL >> 1);
92731- if (IS_ERR(mod->args)) {
92732- err = PTR_ERR(mod->args);
92733- goto free_arch_cleanup;
92734- }
92735-
92736 dynamic_debug_setup(info->debug, info->num_debug);
92737
92738 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
92739@@ -3373,11 +3540,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
92740 ddebug_cleanup:
92741 dynamic_debug_remove(info->debug);
92742 synchronize_sched();
92743- kfree(mod->args);
92744- free_arch_cleanup:
92745 module_arch_cleanup(mod);
92746 free_modinfo:
92747 free_modinfo(mod);
92748+ kfree(mod->args);
92749 free_unload:
92750 module_unload_free(mod);
92751 unlink_mod:
92752@@ -3390,7 +3556,8 @@ static int load_module(struct load_info *info, const char __user *uargs,
92753 mutex_unlock(&module_mutex);
92754 free_module:
92755 /* Free lock-classes; relies on the preceding sync_rcu() */
92756- lockdep_free_key_range(mod->module_core, mod->core_size);
92757+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
92758+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
92759
92760 module_deallocate(mod, info);
92761 free_copy:
92762@@ -3467,10 +3634,16 @@ static const char *get_ksymbol(struct module *mod,
92763 unsigned long nextval;
92764
92765 /* At worse, next value is at end of module */
92766- if (within_module_init(addr, mod))
92767- nextval = (unsigned long)mod->module_init+mod->init_text_size;
92768+ if (within_module_init_rx(addr, mod))
92769+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
92770+ else if (within_module_init_rw(addr, mod))
92771+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
92772+ else if (within_module_core_rx(addr, mod))
92773+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
92774+ else if (within_module_core_rw(addr, mod))
92775+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
92776 else
92777- nextval = (unsigned long)mod->module_core+mod->core_text_size;
92778+ return NULL;
92779
92780 /* Scan for closest preceding symbol, and next symbol. (ELF
92781 starts real symbols at 1). */
92782@@ -3718,7 +3891,7 @@ static int m_show(struct seq_file *m, void *p)
92783 return 0;
92784
92785 seq_printf(m, "%s %u",
92786- mod->name, mod->init_size + mod->core_size);
92787+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
92788 print_unload_info(m, mod);
92789
92790 /* Informative for users. */
92791@@ -3727,7 +3900,7 @@ static int m_show(struct seq_file *m, void *p)
92792 mod->state == MODULE_STATE_COMING ? "Loading" :
92793 "Live");
92794 /* Used by oprofile and other similar tools. */
92795- seq_printf(m, " 0x%pK", mod->module_core);
92796+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
92797
92798 /* Taints info */
92799 if (mod->taints)
92800@@ -3763,7 +3936,17 @@ static const struct file_operations proc_modules_operations = {
92801
92802 static int __init proc_modules_init(void)
92803 {
92804+#ifndef CONFIG_GRKERNSEC_HIDESYM
92805+#ifdef CONFIG_GRKERNSEC_PROC_USER
92806+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
92807+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
92808+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
92809+#else
92810 proc_create("modules", 0, NULL, &proc_modules_operations);
92811+#endif
92812+#else
92813+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
92814+#endif
92815 return 0;
92816 }
92817 module_init(proc_modules_init);
92818@@ -3824,7 +4007,8 @@ struct module *__module_address(unsigned long addr)
92819 {
92820 struct module *mod;
92821
92822- if (addr < module_addr_min || addr > module_addr_max)
92823+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
92824+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
92825 return NULL;
92826
92827 list_for_each_entry_rcu(mod, &modules, list) {
92828@@ -3865,11 +4049,20 @@ bool is_module_text_address(unsigned long addr)
92829 */
92830 struct module *__module_text_address(unsigned long addr)
92831 {
92832- struct module *mod = __module_address(addr);
92833+ struct module *mod;
92834+
92835+#ifdef CONFIG_X86_32
92836+ addr = ktla_ktva(addr);
92837+#endif
92838+
92839+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
92840+ return NULL;
92841+
92842+ mod = __module_address(addr);
92843+
92844 if (mod) {
92845 /* Make sure it's within the text section. */
92846- if (!within(addr, mod->module_init, mod->init_text_size)
92847- && !within(addr, mod->module_core, mod->core_text_size))
92848+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
92849 mod = NULL;
92850 }
92851 return mod;
92852diff --git a/kernel/notifier.c b/kernel/notifier.c
92853index ae9fc7c..5085fbf 100644
92854--- a/kernel/notifier.c
92855+++ b/kernel/notifier.c
92856@@ -5,6 +5,7 @@
92857 #include <linux/rcupdate.h>
92858 #include <linux/vmalloc.h>
92859 #include <linux/reboot.h>
92860+#include <linux/mm.h>
92861
92862 /*
92863 * Notifier list for kernel code which wants to be called
92864@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
92865 while ((*nl) != NULL) {
92866 if (n->priority > (*nl)->priority)
92867 break;
92868- nl = &((*nl)->next);
92869+ nl = (struct notifier_block **)&((*nl)->next);
92870 }
92871- n->next = *nl;
92872+ pax_open_kernel();
92873+ *(const void **)&n->next = *nl;
92874 rcu_assign_pointer(*nl, n);
92875+ pax_close_kernel();
92876 return 0;
92877 }
92878
92879@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
92880 return 0;
92881 if (n->priority > (*nl)->priority)
92882 break;
92883- nl = &((*nl)->next);
92884+ nl = (struct notifier_block **)&((*nl)->next);
92885 }
92886- n->next = *nl;
92887+ pax_open_kernel();
92888+ *(const void **)&n->next = *nl;
92889 rcu_assign_pointer(*nl, n);
92890+ pax_close_kernel();
92891 return 0;
92892 }
92893
92894@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
92895 {
92896 while ((*nl) != NULL) {
92897 if ((*nl) == n) {
92898+ pax_open_kernel();
92899 rcu_assign_pointer(*nl, n->next);
92900+ pax_close_kernel();
92901 return 0;
92902 }
92903- nl = &((*nl)->next);
92904+ nl = (struct notifier_block **)&((*nl)->next);
92905 }
92906 return -ENOENT;
92907 }
92908diff --git a/kernel/padata.c b/kernel/padata.c
92909index b38bea9..91acfbe 100644
92910--- a/kernel/padata.c
92911+++ b/kernel/padata.c
92912@@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parallel_data *pd)
92913 * seq_nr mod. number of cpus in use.
92914 */
92915
92916- seq_nr = atomic_inc_return(&pd->seq_nr);
92917+ seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
92918 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
92919
92920 return padata_index_to_cpu(pd, cpu_index);
92921@@ -428,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
92922 padata_init_pqueues(pd);
92923 padata_init_squeues(pd);
92924 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
92925- atomic_set(&pd->seq_nr, -1);
92926+ atomic_set_unchecked(&pd->seq_nr, -1);
92927 atomic_set(&pd->reorder_objects, 0);
92928 atomic_set(&pd->refcnt, 0);
92929 pd->pinst = pinst;
92930diff --git a/kernel/panic.c b/kernel/panic.c
92931index 8136ad7..15c857b 100644
92932--- a/kernel/panic.c
92933+++ b/kernel/panic.c
92934@@ -54,7 +54,7 @@ EXPORT_SYMBOL(panic_blink);
92935 /*
92936 * Stop ourself in panic -- architecture code may override this
92937 */
92938-void __weak panic_smp_self_stop(void)
92939+void __weak __noreturn panic_smp_self_stop(void)
92940 {
92941 while (1)
92942 cpu_relax();
92943@@ -425,7 +425,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
92944 disable_trace_on_warning();
92945
92946 pr_warn("------------[ cut here ]------------\n");
92947- pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
92948+ pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
92949 raw_smp_processor_id(), current->pid, file, line, caller);
92950
92951 if (args)
92952@@ -490,7 +490,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
92953 */
92954 __visible void __stack_chk_fail(void)
92955 {
92956- panic("stack-protector: Kernel stack is corrupted in: %p\n",
92957+ dump_stack();
92958+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
92959 __builtin_return_address(0));
92960 }
92961 EXPORT_SYMBOL(__stack_chk_fail);
92962diff --git a/kernel/pid.c b/kernel/pid.c
92963index cd36a5e..11f185d 100644
92964--- a/kernel/pid.c
92965+++ b/kernel/pid.c
92966@@ -33,6 +33,7 @@
92967 #include <linux/rculist.h>
92968 #include <linux/bootmem.h>
92969 #include <linux/hash.h>
92970+#include <linux/security.h>
92971 #include <linux/pid_namespace.h>
92972 #include <linux/init_task.h>
92973 #include <linux/syscalls.h>
92974@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
92975
92976 int pid_max = PID_MAX_DEFAULT;
92977
92978-#define RESERVED_PIDS 300
92979+#define RESERVED_PIDS 500
92980
92981 int pid_max_min = RESERVED_PIDS + 1;
92982 int pid_max_max = PID_MAX_LIMIT;
92983@@ -450,10 +451,18 @@ EXPORT_SYMBOL(pid_task);
92984 */
92985 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
92986 {
92987+ struct task_struct *task;
92988+
92989 rcu_lockdep_assert(rcu_read_lock_held(),
92990 "find_task_by_pid_ns() needs rcu_read_lock()"
92991 " protection");
92992- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
92993+
92994+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
92995+
92996+ if (gr_pid_is_chrooted(task))
92997+ return NULL;
92998+
92999+ return task;
93000 }
93001
93002 struct task_struct *find_task_by_vpid(pid_t vnr)
93003@@ -461,6 +470,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
93004 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
93005 }
93006
93007+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
93008+{
93009+ rcu_lockdep_assert(rcu_read_lock_held(),
93010+ "find_task_by_pid_ns() needs rcu_read_lock()"
93011+ " protection");
93012+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
93013+}
93014+
93015 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
93016 {
93017 struct pid *pid;
93018diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
93019index a65ba13..f600dbb 100644
93020--- a/kernel/pid_namespace.c
93021+++ b/kernel/pid_namespace.c
93022@@ -274,7 +274,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
93023 void __user *buffer, size_t *lenp, loff_t *ppos)
93024 {
93025 struct pid_namespace *pid_ns = task_active_pid_ns(current);
93026- struct ctl_table tmp = *table;
93027+ ctl_table_no_const tmp = *table;
93028
93029 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
93030 return -EPERM;
93031diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
93032index 7e01f78..f5da19d 100644
93033--- a/kernel/power/Kconfig
93034+++ b/kernel/power/Kconfig
93035@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
93036 config HIBERNATION
93037 bool "Hibernation (aka 'suspend to disk')"
93038 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
93039+ depends on !GRKERNSEC_KMEM
93040+ depends on !PAX_MEMORY_SANITIZE
93041 select HIBERNATE_CALLBACKS
93042 select LZO_COMPRESS
93043 select LZO_DECOMPRESS
93044diff --git a/kernel/power/process.c b/kernel/power/process.c
93045index 564f786..361a18e 100644
93046--- a/kernel/power/process.c
93047+++ b/kernel/power/process.c
93048@@ -35,6 +35,7 @@ static int try_to_freeze_tasks(bool user_only)
93049 unsigned int elapsed_msecs;
93050 bool wakeup = false;
93051 int sleep_usecs = USEC_PER_MSEC;
93052+ bool timedout = false;
93053
93054 do_gettimeofday(&start);
93055
93056@@ -45,13 +46,20 @@ static int try_to_freeze_tasks(bool user_only)
93057
93058 while (true) {
93059 todo = 0;
93060+ if (time_after(jiffies, end_time))
93061+ timedout = true;
93062 read_lock(&tasklist_lock);
93063 for_each_process_thread(g, p) {
93064 if (p == current || !freeze_task(p))
93065 continue;
93066
93067- if (!freezer_should_skip(p))
93068+ if (!freezer_should_skip(p)) {
93069 todo++;
93070+ if (timedout) {
93071+ printk(KERN_ERR "Task refusing to freeze:\n");
93072+ sched_show_task(p);
93073+ }
93074+ }
93075 }
93076 read_unlock(&tasklist_lock);
93077
93078@@ -60,7 +68,7 @@ static int try_to_freeze_tasks(bool user_only)
93079 todo += wq_busy;
93080 }
93081
93082- if (!todo || time_after(jiffies, end_time))
93083+ if (!todo || timedout)
93084 break;
93085
93086 if (pm_wakeup_pending()) {
93087diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
93088index bb0635b..9aff9f3 100644
93089--- a/kernel/printk/printk.c
93090+++ b/kernel/printk/printk.c
93091@@ -486,6 +486,11 @@ int check_syslog_permissions(int type, bool from_file)
93092 if (from_file && type != SYSLOG_ACTION_OPEN)
93093 return 0;
93094
93095+#ifdef CONFIG_GRKERNSEC_DMESG
93096+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
93097+ return -EPERM;
93098+#endif
93099+
93100 if (syslog_action_restricted(type)) {
93101 if (capable(CAP_SYSLOG))
93102 return 0;
93103diff --git a/kernel/profile.c b/kernel/profile.c
93104index a7bcd28..5b368fa 100644
93105--- a/kernel/profile.c
93106+++ b/kernel/profile.c
93107@@ -37,7 +37,7 @@ struct profile_hit {
93108 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
93109 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
93110
93111-static atomic_t *prof_buffer;
93112+static atomic_unchecked_t *prof_buffer;
93113 static unsigned long prof_len, prof_shift;
93114
93115 int prof_on __read_mostly;
93116@@ -256,7 +256,7 @@ static void profile_flip_buffers(void)
93117 hits[i].pc = 0;
93118 continue;
93119 }
93120- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
93121+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
93122 hits[i].hits = hits[i].pc = 0;
93123 }
93124 }
93125@@ -317,9 +317,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
93126 * Add the current hit(s) and flush the write-queue out
93127 * to the global buffer:
93128 */
93129- atomic_add(nr_hits, &prof_buffer[pc]);
93130+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
93131 for (i = 0; i < NR_PROFILE_HIT; ++i) {
93132- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
93133+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
93134 hits[i].pc = hits[i].hits = 0;
93135 }
93136 out:
93137@@ -394,7 +394,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
93138 {
93139 unsigned long pc;
93140 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
93141- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
93142+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
93143 }
93144 #endif /* !CONFIG_SMP */
93145
93146@@ -489,7 +489,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
93147 return -EFAULT;
93148 buf++; p++; count--; read++;
93149 }
93150- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
93151+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
93152 if (copy_to_user(buf, (void *)pnt, count))
93153 return -EFAULT;
93154 read += count;
93155@@ -520,7 +520,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
93156 }
93157 #endif
93158 profile_discard_flip_buffers();
93159- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
93160+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
93161 return count;
93162 }
93163
93164diff --git a/kernel/ptrace.c b/kernel/ptrace.c
93165index 9a34bd8..38d90e5 100644
93166--- a/kernel/ptrace.c
93167+++ b/kernel/ptrace.c
93168@@ -321,7 +321,7 @@ static int ptrace_attach(struct task_struct *task, long request,
93169 if (seize)
93170 flags |= PT_SEIZED;
93171 rcu_read_lock();
93172- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
93173+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
93174 flags |= PT_PTRACE_CAP;
93175 rcu_read_unlock();
93176 task->ptrace = flags;
93177@@ -515,7 +515,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
93178 break;
93179 return -EIO;
93180 }
93181- if (copy_to_user(dst, buf, retval))
93182+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
93183 return -EFAULT;
93184 copied += retval;
93185 src += retval;
93186@@ -803,7 +803,7 @@ int ptrace_request(struct task_struct *child, long request,
93187 bool seized = child->ptrace & PT_SEIZED;
93188 int ret = -EIO;
93189 siginfo_t siginfo, *si;
93190- void __user *datavp = (void __user *) data;
93191+ void __user *datavp = (__force void __user *) data;
93192 unsigned long __user *datalp = datavp;
93193 unsigned long flags;
93194
93195@@ -1049,14 +1049,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
93196 goto out;
93197 }
93198
93199+ if (gr_handle_ptrace(child, request)) {
93200+ ret = -EPERM;
93201+ goto out_put_task_struct;
93202+ }
93203+
93204 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
93205 ret = ptrace_attach(child, request, addr, data);
93206 /*
93207 * Some architectures need to do book-keeping after
93208 * a ptrace attach.
93209 */
93210- if (!ret)
93211+ if (!ret) {
93212 arch_ptrace_attach(child);
93213+ gr_audit_ptrace(child);
93214+ }
93215 goto out_put_task_struct;
93216 }
93217
93218@@ -1084,7 +1091,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
93219 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
93220 if (copied != sizeof(tmp))
93221 return -EIO;
93222- return put_user(tmp, (unsigned long __user *)data);
93223+ return put_user(tmp, (__force unsigned long __user *)data);
93224 }
93225
93226 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
93227@@ -1177,7 +1184,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
93228 }
93229
93230 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
93231- compat_long_t, addr, compat_long_t, data)
93232+ compat_ulong_t, addr, compat_ulong_t, data)
93233 {
93234 struct task_struct *child;
93235 long ret;
93236@@ -1193,14 +1200,21 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
93237 goto out;
93238 }
93239
93240+ if (gr_handle_ptrace(child, request)) {
93241+ ret = -EPERM;
93242+ goto out_put_task_struct;
93243+ }
93244+
93245 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
93246 ret = ptrace_attach(child, request, addr, data);
93247 /*
93248 * Some architectures need to do book-keeping after
93249 * a ptrace attach.
93250 */
93251- if (!ret)
93252+ if (!ret) {
93253 arch_ptrace_attach(child);
93254+ gr_audit_ptrace(child);
93255+ }
93256 goto out_put_task_struct;
93257 }
93258
93259diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
93260index 30d42aa..cac5d66 100644
93261--- a/kernel/rcu/rcutorture.c
93262+++ b/kernel/rcu/rcutorture.c
93263@@ -134,12 +134,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
93264 rcu_torture_count) = { 0 };
93265 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
93266 rcu_torture_batch) = { 0 };
93267-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
93268-static atomic_t n_rcu_torture_alloc;
93269-static atomic_t n_rcu_torture_alloc_fail;
93270-static atomic_t n_rcu_torture_free;
93271-static atomic_t n_rcu_torture_mberror;
93272-static atomic_t n_rcu_torture_error;
93273+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
93274+static atomic_unchecked_t n_rcu_torture_alloc;
93275+static atomic_unchecked_t n_rcu_torture_alloc_fail;
93276+static atomic_unchecked_t n_rcu_torture_free;
93277+static atomic_unchecked_t n_rcu_torture_mberror;
93278+static atomic_unchecked_t n_rcu_torture_error;
93279 static long n_rcu_torture_barrier_error;
93280 static long n_rcu_torture_boost_ktrerror;
93281 static long n_rcu_torture_boost_rterror;
93282@@ -148,7 +148,7 @@ static long n_rcu_torture_boosts;
93283 static long n_rcu_torture_timers;
93284 static long n_barrier_attempts;
93285 static long n_barrier_successes;
93286-static atomic_long_t n_cbfloods;
93287+static atomic_long_unchecked_t n_cbfloods;
93288 static struct list_head rcu_torture_removed;
93289
93290 static int rcu_torture_writer_state;
93291@@ -211,11 +211,11 @@ rcu_torture_alloc(void)
93292
93293 spin_lock_bh(&rcu_torture_lock);
93294 if (list_empty(&rcu_torture_freelist)) {
93295- atomic_inc(&n_rcu_torture_alloc_fail);
93296+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
93297 spin_unlock_bh(&rcu_torture_lock);
93298 return NULL;
93299 }
93300- atomic_inc(&n_rcu_torture_alloc);
93301+ atomic_inc_unchecked(&n_rcu_torture_alloc);
93302 p = rcu_torture_freelist.next;
93303 list_del_init(p);
93304 spin_unlock_bh(&rcu_torture_lock);
93305@@ -228,7 +228,7 @@ rcu_torture_alloc(void)
93306 static void
93307 rcu_torture_free(struct rcu_torture *p)
93308 {
93309- atomic_inc(&n_rcu_torture_free);
93310+ atomic_inc_unchecked(&n_rcu_torture_free);
93311 spin_lock_bh(&rcu_torture_lock);
93312 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
93313 spin_unlock_bh(&rcu_torture_lock);
93314@@ -308,7 +308,7 @@ rcu_torture_pipe_update_one(struct rcu_torture *rp)
93315 i = rp->rtort_pipe_count;
93316 if (i > RCU_TORTURE_PIPE_LEN)
93317 i = RCU_TORTURE_PIPE_LEN;
93318- atomic_inc(&rcu_torture_wcount[i]);
93319+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
93320 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
93321 rp->rtort_mbtest = 0;
93322 return true;
93323@@ -796,7 +796,7 @@ rcu_torture_cbflood(void *arg)
93324 VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started");
93325 do {
93326 schedule_timeout_interruptible(cbflood_inter_holdoff);
93327- atomic_long_inc(&n_cbfloods);
93328+ atomic_long_inc_unchecked(&n_cbfloods);
93329 WARN_ON(signal_pending(current));
93330 for (i = 0; i < cbflood_n_burst; i++) {
93331 for (j = 0; j < cbflood_n_per_burst; j++) {
93332@@ -915,7 +915,7 @@ rcu_torture_writer(void *arg)
93333 i = old_rp->rtort_pipe_count;
93334 if (i > RCU_TORTURE_PIPE_LEN)
93335 i = RCU_TORTURE_PIPE_LEN;
93336- atomic_inc(&rcu_torture_wcount[i]);
93337+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
93338 old_rp->rtort_pipe_count++;
93339 switch (synctype[torture_random(&rand) % nsynctypes]) {
93340 case RTWS_DEF_FREE:
93341@@ -1036,7 +1036,7 @@ static void rcu_torture_timer(unsigned long unused)
93342 return;
93343 }
93344 if (p->rtort_mbtest == 0)
93345- atomic_inc(&n_rcu_torture_mberror);
93346+ atomic_inc_unchecked(&n_rcu_torture_mberror);
93347 spin_lock(&rand_lock);
93348 cur_ops->read_delay(&rand);
93349 n_rcu_torture_timers++;
93350@@ -1111,7 +1111,7 @@ rcu_torture_reader(void *arg)
93351 continue;
93352 }
93353 if (p->rtort_mbtest == 0)
93354- atomic_inc(&n_rcu_torture_mberror);
93355+ atomic_inc_unchecked(&n_rcu_torture_mberror);
93356 cur_ops->read_delay(&rand);
93357 preempt_disable();
93358 pipe_count = p->rtort_pipe_count;
93359@@ -1180,11 +1180,11 @@ rcu_torture_stats_print(void)
93360 rcu_torture_current,
93361 rcu_torture_current_version,
93362 list_empty(&rcu_torture_freelist),
93363- atomic_read(&n_rcu_torture_alloc),
93364- atomic_read(&n_rcu_torture_alloc_fail),
93365- atomic_read(&n_rcu_torture_free));
93366+ atomic_read_unchecked(&n_rcu_torture_alloc),
93367+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
93368+ atomic_read_unchecked(&n_rcu_torture_free));
93369 pr_cont("rtmbe: %d rtbke: %ld rtbre: %ld ",
93370- atomic_read(&n_rcu_torture_mberror),
93371+ atomic_read_unchecked(&n_rcu_torture_mberror),
93372 n_rcu_torture_boost_ktrerror,
93373 n_rcu_torture_boost_rterror);
93374 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
93375@@ -1196,17 +1196,17 @@ rcu_torture_stats_print(void)
93376 n_barrier_successes,
93377 n_barrier_attempts,
93378 n_rcu_torture_barrier_error);
93379- pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods));
93380+ pr_cont("cbflood: %ld\n", atomic_long_read_unchecked(&n_cbfloods));
93381
93382 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
93383- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
93384+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
93385 n_rcu_torture_barrier_error != 0 ||
93386 n_rcu_torture_boost_ktrerror != 0 ||
93387 n_rcu_torture_boost_rterror != 0 ||
93388 n_rcu_torture_boost_failure != 0 ||
93389 i > 1) {
93390 pr_cont("%s", "!!! ");
93391- atomic_inc(&n_rcu_torture_error);
93392+ atomic_inc_unchecked(&n_rcu_torture_error);
93393 WARN_ON_ONCE(1);
93394 }
93395 pr_cont("Reader Pipe: ");
93396@@ -1223,7 +1223,7 @@ rcu_torture_stats_print(void)
93397 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
93398 pr_cont("Free-Block Circulation: ");
93399 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
93400- pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
93401+ pr_cont(" %d", atomic_read_unchecked(&rcu_torture_wcount[i]));
93402 }
93403 pr_cont("\n");
93404
93405@@ -1570,7 +1570,7 @@ rcu_torture_cleanup(void)
93406
93407 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
93408
93409- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
93410+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
93411 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
93412 else if (torture_onoff_failures())
93413 rcu_torture_print_module_parms(cur_ops,
93414@@ -1695,18 +1695,18 @@ rcu_torture_init(void)
93415
93416 rcu_torture_current = NULL;
93417 rcu_torture_current_version = 0;
93418- atomic_set(&n_rcu_torture_alloc, 0);
93419- atomic_set(&n_rcu_torture_alloc_fail, 0);
93420- atomic_set(&n_rcu_torture_free, 0);
93421- atomic_set(&n_rcu_torture_mberror, 0);
93422- atomic_set(&n_rcu_torture_error, 0);
93423+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
93424+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
93425+ atomic_set_unchecked(&n_rcu_torture_free, 0);
93426+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
93427+ atomic_set_unchecked(&n_rcu_torture_error, 0);
93428 n_rcu_torture_barrier_error = 0;
93429 n_rcu_torture_boost_ktrerror = 0;
93430 n_rcu_torture_boost_rterror = 0;
93431 n_rcu_torture_boost_failure = 0;
93432 n_rcu_torture_boosts = 0;
93433 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
93434- atomic_set(&rcu_torture_wcount[i], 0);
93435+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
93436 for_each_possible_cpu(cpu) {
93437 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
93438 per_cpu(rcu_torture_count, cpu)[i] = 0;
93439diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
93440index cc9ceca..ce075a6 100644
93441--- a/kernel/rcu/tiny.c
93442+++ b/kernel/rcu/tiny.c
93443@@ -42,7 +42,7 @@
93444 /* Forward declarations for tiny_plugin.h. */
93445 struct rcu_ctrlblk;
93446 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
93447-static void rcu_process_callbacks(struct softirq_action *unused);
93448+static void rcu_process_callbacks(void);
93449 static void __call_rcu(struct rcu_head *head,
93450 void (*func)(struct rcu_head *rcu),
93451 struct rcu_ctrlblk *rcp);
93452@@ -210,7 +210,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
93453 false));
93454 }
93455
93456-static void rcu_process_callbacks(struct softirq_action *unused)
93457+static __latent_entropy void rcu_process_callbacks(void)
93458 {
93459 __rcu_process_callbacks(&rcu_sched_ctrlblk);
93460 __rcu_process_callbacks(&rcu_bh_ctrlblk);
93461diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
93462index f94e209..d2985bd 100644
93463--- a/kernel/rcu/tiny_plugin.h
93464+++ b/kernel/rcu/tiny_plugin.h
93465@@ -150,10 +150,10 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
93466 rcp->name, rcp->ticks_this_gp, DYNTICK_TASK_EXIT_IDLE,
93467 jiffies - rcp->gp_start, rcp->qlen);
93468 dump_stack();
93469- ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
93470+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies +
93471 3 * rcu_jiffies_till_stall_check() + 3;
93472 } else if (ULONG_CMP_GE(j, js)) {
93473- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
93474+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
93475 }
93476 }
93477
93478@@ -161,7 +161,7 @@ static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
93479 {
93480 rcp->ticks_this_gp = 0;
93481 rcp->gp_start = jiffies;
93482- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
93483+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
93484 }
93485
93486 static void check_cpu_stalls(void)
93487diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
93488index 48d640c..9401d30 100644
93489--- a/kernel/rcu/tree.c
93490+++ b/kernel/rcu/tree.c
93491@@ -268,7 +268,7 @@ static void rcu_momentary_dyntick_idle(void)
93492 */
93493 rdtp = this_cpu_ptr(&rcu_dynticks);
93494 smp_mb__before_atomic(); /* Earlier stuff before QS. */
93495- atomic_add(2, &rdtp->dynticks); /* QS. */
93496+ atomic_add_unchecked(2, &rdtp->dynticks); /* QS. */
93497 smp_mb__after_atomic(); /* Later stuff after QS. */
93498 break;
93499 }
93500@@ -580,9 +580,9 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
93501 rcu_prepare_for_idle();
93502 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
93503 smp_mb__before_atomic(); /* See above. */
93504- atomic_inc(&rdtp->dynticks);
93505+ atomic_inc_unchecked(&rdtp->dynticks);
93506 smp_mb__after_atomic(); /* Force ordering with next sojourn. */
93507- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
93508+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
93509 rcu_dynticks_task_enter();
93510
93511 /*
93512@@ -703,10 +703,10 @@ static void rcu_eqs_exit_common(long long oldval, int user)
93513
93514 rcu_dynticks_task_exit();
93515 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
93516- atomic_inc(&rdtp->dynticks);
93517+ atomic_inc_unchecked(&rdtp->dynticks);
93518 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
93519 smp_mb__after_atomic(); /* See above. */
93520- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
93521+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
93522 rcu_cleanup_after_idle();
93523 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
93524 if (!user && !is_idle_task(current)) {
93525@@ -840,12 +840,12 @@ void rcu_nmi_enter(void)
93526 * to be in the outermost NMI handler that interrupted an RCU-idle
93527 * period (observation due to Andy Lutomirski).
93528 */
93529- if (!(atomic_read(&rdtp->dynticks) & 0x1)) {
93530+ if (!(atomic_read_unchecked(&rdtp->dynticks) & 0x1)) {
93531 smp_mb__before_atomic(); /* Force delay from prior write. */
93532- atomic_inc(&rdtp->dynticks);
93533+ atomic_inc_unchecked(&rdtp->dynticks);
93534 /* atomic_inc() before later RCU read-side crit sects */
93535 smp_mb__after_atomic(); /* See above. */
93536- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
93537+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
93538 incby = 1;
93539 }
93540 rdtp->dynticks_nmi_nesting += incby;
93541@@ -870,7 +870,7 @@ void rcu_nmi_exit(void)
93542 * to us!)
93543 */
93544 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
93545- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
93546+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
93547
93548 /*
93549 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
93550@@ -885,9 +885,9 @@ void rcu_nmi_exit(void)
93551 rdtp->dynticks_nmi_nesting = 0;
93552 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
93553 smp_mb__before_atomic(); /* See above. */
93554- atomic_inc(&rdtp->dynticks);
93555+ atomic_inc_unchecked(&rdtp->dynticks);
93556 smp_mb__after_atomic(); /* Force delay to next write. */
93557- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
93558+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
93559 }
93560
93561 /**
93562@@ -900,7 +900,7 @@ void rcu_nmi_exit(void)
93563 */
93564 bool notrace __rcu_is_watching(void)
93565 {
93566- return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
93567+ return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
93568 }
93569
93570 /**
93571@@ -983,7 +983,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
93572 static int dyntick_save_progress_counter(struct rcu_data *rdp,
93573 bool *isidle, unsigned long *maxj)
93574 {
93575- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
93576+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
93577 rcu_sysidle_check_cpu(rdp, isidle, maxj);
93578 if ((rdp->dynticks_snap & 0x1) == 0) {
93579 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
93580@@ -991,7 +991,7 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
93581 } else {
93582 if (ULONG_CMP_LT(ACCESS_ONCE(rdp->gpnum) + ULONG_MAX / 4,
93583 rdp->mynode->gpnum))
93584- ACCESS_ONCE(rdp->gpwrap) = true;
93585+ ACCESS_ONCE_RW(rdp->gpwrap) = true;
93586 return 0;
93587 }
93588 }
93589@@ -1009,7 +1009,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
93590 int *rcrmp;
93591 unsigned int snap;
93592
93593- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
93594+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
93595 snap = (unsigned int)rdp->dynticks_snap;
93596
93597 /*
93598@@ -1072,10 +1072,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
93599 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
93600 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
93601 if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
93602- ACCESS_ONCE(rdp->cond_resched_completed) =
93603+ ACCESS_ONCE_RW(rdp->cond_resched_completed) =
93604 ACCESS_ONCE(rdp->mynode->completed);
93605 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
93606- ACCESS_ONCE(*rcrmp) =
93607+ ACCESS_ONCE_RW(*rcrmp) =
93608 ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
93609 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
93610 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
93611@@ -1097,7 +1097,7 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
93612 rsp->gp_start = j;
93613 smp_wmb(); /* Record start time before stall time. */
93614 j1 = rcu_jiffies_till_stall_check();
93615- ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
93616+ ACCESS_ONCE_RW(rsp->jiffies_stall) = j + j1;
93617 rsp->jiffies_resched = j + j1 / 2;
93618 rsp->n_force_qs_gpstart = ACCESS_ONCE(rsp->n_force_qs);
93619 }
93620@@ -1156,7 +1156,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
93621 raw_spin_unlock_irqrestore(&rnp->lock, flags);
93622 return;
93623 }
93624- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
93625+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
93626 raw_spin_unlock_irqrestore(&rnp->lock, flags);
93627
93628 /*
93629@@ -1240,7 +1240,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
93630
93631 raw_spin_lock_irqsave(&rnp->lock, flags);
93632 if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
93633- ACCESS_ONCE(rsp->jiffies_stall) = jiffies +
93634+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies +
93635 3 * rcu_jiffies_till_stall_check() + 3;
93636 raw_spin_unlock_irqrestore(&rnp->lock, flags);
93637
93638@@ -1324,7 +1324,7 @@ void rcu_cpu_stall_reset(void)
93639 struct rcu_state *rsp;
93640
93641 for_each_rcu_flavor(rsp)
93642- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
93643+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
93644 }
93645
93646 /*
93647@@ -1671,7 +1671,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
93648 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
93649 rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
93650 zero_cpu_stall_ticks(rdp);
93651- ACCESS_ONCE(rdp->gpwrap) = false;
93652+ ACCESS_ONCE_RW(rdp->gpwrap) = false;
93653 }
93654 return ret;
93655 }
93656@@ -1706,7 +1706,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
93657 struct rcu_data *rdp;
93658 struct rcu_node *rnp = rcu_get_root(rsp);
93659
93660- ACCESS_ONCE(rsp->gp_activity) = jiffies;
93661+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
93662 rcu_bind_gp_kthread();
93663 raw_spin_lock_irq(&rnp->lock);
93664 smp_mb__after_unlock_lock();
93665@@ -1715,7 +1715,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
93666 raw_spin_unlock_irq(&rnp->lock);
93667 return 0;
93668 }
93669- ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
93670+ ACCESS_ONCE_RW(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
93671
93672 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
93673 /*
93674@@ -1756,9 +1756,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
93675 rdp = this_cpu_ptr(rsp->rda);
93676 rcu_preempt_check_blocked_tasks(rnp);
93677 rnp->qsmask = rnp->qsmaskinit;
93678- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
93679+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
93680 WARN_ON_ONCE(rnp->completed != rsp->completed);
93681- ACCESS_ONCE(rnp->completed) = rsp->completed;
93682+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
93683 if (rnp == rdp->mynode)
93684 (void)__note_gp_changes(rsp, rnp, rdp);
93685 rcu_preempt_boost_start_gp(rnp);
93686@@ -1767,7 +1767,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
93687 rnp->grphi, rnp->qsmask);
93688 raw_spin_unlock_irq(&rnp->lock);
93689 cond_resched_rcu_qs();
93690- ACCESS_ONCE(rsp->gp_activity) = jiffies;
93691+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
93692 }
93693
93694 mutex_unlock(&rsp->onoff_mutex);
93695@@ -1784,7 +1784,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
93696 unsigned long maxj;
93697 struct rcu_node *rnp = rcu_get_root(rsp);
93698
93699- ACCESS_ONCE(rsp->gp_activity) = jiffies;
93700+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
93701 rsp->n_force_qs++;
93702 if (fqs_state == RCU_SAVE_DYNTICK) {
93703 /* Collect dyntick-idle snapshots. */
93704@@ -1805,7 +1805,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
93705 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
93706 raw_spin_lock_irq(&rnp->lock);
93707 smp_mb__after_unlock_lock();
93708- ACCESS_ONCE(rsp->gp_flags) =
93709+ ACCESS_ONCE_RW(rsp->gp_flags) =
93710 ACCESS_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS;
93711 raw_spin_unlock_irq(&rnp->lock);
93712 }
93713@@ -1823,7 +1823,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
93714 struct rcu_data *rdp;
93715 struct rcu_node *rnp = rcu_get_root(rsp);
93716
93717- ACCESS_ONCE(rsp->gp_activity) = jiffies;
93718+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
93719 raw_spin_lock_irq(&rnp->lock);
93720 smp_mb__after_unlock_lock();
93721 gp_duration = jiffies - rsp->gp_start;
93722@@ -1852,7 +1852,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
93723 rcu_for_each_node_breadth_first(rsp, rnp) {
93724 raw_spin_lock_irq(&rnp->lock);
93725 smp_mb__after_unlock_lock();
93726- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
93727+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
93728 rdp = this_cpu_ptr(rsp->rda);
93729 if (rnp == rdp->mynode)
93730 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
93731@@ -1860,7 +1860,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
93732 nocb += rcu_future_gp_cleanup(rsp, rnp);
93733 raw_spin_unlock_irq(&rnp->lock);
93734 cond_resched_rcu_qs();
93735- ACCESS_ONCE(rsp->gp_activity) = jiffies;
93736+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
93737 }
93738 rnp = rcu_get_root(rsp);
93739 raw_spin_lock_irq(&rnp->lock);
93740@@ -1868,14 +1868,14 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
93741 rcu_nocb_gp_set(rnp, nocb);
93742
93743 /* Declare grace period done. */
93744- ACCESS_ONCE(rsp->completed) = rsp->gpnum;
93745+ ACCESS_ONCE_RW(rsp->completed) = rsp->gpnum;
93746 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
93747 rsp->fqs_state = RCU_GP_IDLE;
93748 rdp = this_cpu_ptr(rsp->rda);
93749 /* Advance CBs to reduce false positives below. */
93750 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
93751 if (needgp || cpu_needs_another_gp(rsp, rdp)) {
93752- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
93753+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
93754 trace_rcu_grace_period(rsp->name,
93755 ACCESS_ONCE(rsp->gpnum),
93756 TPS("newreq"));
93757@@ -1910,7 +1910,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
93758 if (rcu_gp_init(rsp))
93759 break;
93760 cond_resched_rcu_qs();
93761- ACCESS_ONCE(rsp->gp_activity) = jiffies;
93762+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
93763 WARN_ON(signal_pending(current));
93764 trace_rcu_grace_period(rsp->name,
93765 ACCESS_ONCE(rsp->gpnum),
93766@@ -1954,11 +1954,11 @@ static int __noreturn rcu_gp_kthread(void *arg)
93767 ACCESS_ONCE(rsp->gpnum),
93768 TPS("fqsend"));
93769 cond_resched_rcu_qs();
93770- ACCESS_ONCE(rsp->gp_activity) = jiffies;
93771+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
93772 } else {
93773 /* Deal with stray signal. */
93774 cond_resched_rcu_qs();
93775- ACCESS_ONCE(rsp->gp_activity) = jiffies;
93776+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
93777 WARN_ON(signal_pending(current));
93778 trace_rcu_grace_period(rsp->name,
93779 ACCESS_ONCE(rsp->gpnum),
93780@@ -2003,7 +2003,7 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
93781 */
93782 return false;
93783 }
93784- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
93785+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
93786 trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
93787 TPS("newreq"));
93788
93789@@ -2228,7 +2228,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
93790 rsp->qlen += rdp->qlen;
93791 rdp->n_cbs_orphaned += rdp->qlen;
93792 rdp->qlen_lazy = 0;
93793- ACCESS_ONCE(rdp->qlen) = 0;
93794+ ACCESS_ONCE_RW(rdp->qlen) = 0;
93795 }
93796
93797 /*
93798@@ -2490,7 +2490,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
93799 }
93800 smp_mb(); /* List handling before counting for rcu_barrier(). */
93801 rdp->qlen_lazy -= count_lazy;
93802- ACCESS_ONCE(rdp->qlen) = rdp->qlen - count;
93803+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen - count;
93804 rdp->n_cbs_invoked += count;
93805
93806 /* Reinstate batch limit if we have worked down the excess. */
93807@@ -2647,7 +2647,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
93808 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
93809 return; /* Someone beat us to it. */
93810 }
93811- ACCESS_ONCE(rsp->gp_flags) =
93812+ ACCESS_ONCE_RW(rsp->gp_flags) =
93813 ACCESS_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS;
93814 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
93815 rcu_gp_kthread_wake(rsp);
93816@@ -2693,7 +2693,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
93817 /*
93818 * Do RCU core processing for the current CPU.
93819 */
93820-static void rcu_process_callbacks(struct softirq_action *unused)
93821+static void rcu_process_callbacks(void)
93822 {
93823 struct rcu_state *rsp;
93824
93825@@ -2805,7 +2805,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
93826 WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
93827 if (debug_rcu_head_queue(head)) {
93828 /* Probable double call_rcu(), so leak the callback. */
93829- ACCESS_ONCE(head->func) = rcu_leak_callback;
93830+ ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
93831 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
93832 return;
93833 }
93834@@ -2833,7 +2833,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
93835 local_irq_restore(flags);
93836 return;
93837 }
93838- ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
93839+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen + 1;
93840 if (lazy)
93841 rdp->qlen_lazy++;
93842 else
93843@@ -3106,11 +3106,11 @@ void synchronize_sched_expedited(void)
93844 * counter wrap on a 32-bit system. Quite a few more CPUs would of
93845 * course be required on a 64-bit system.
93846 */
93847- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
93848+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
93849 (ulong)atomic_long_read(&rsp->expedited_done) +
93850 ULONG_MAX / 8)) {
93851 synchronize_sched();
93852- atomic_long_inc(&rsp->expedited_wrap);
93853+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
93854 return;
93855 }
93856
93857@@ -3118,12 +3118,12 @@ void synchronize_sched_expedited(void)
93858 * Take a ticket. Note that atomic_inc_return() implies a
93859 * full memory barrier.
93860 */
93861- snap = atomic_long_inc_return(&rsp->expedited_start);
93862+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
93863 firstsnap = snap;
93864 if (!try_get_online_cpus()) {
93865 /* CPU hotplug operation in flight, fall back to normal GP. */
93866 wait_rcu_gp(call_rcu_sched);
93867- atomic_long_inc(&rsp->expedited_normal);
93868+ atomic_long_inc_unchecked(&rsp->expedited_normal);
93869 return;
93870 }
93871 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
93872@@ -3136,7 +3136,7 @@ void synchronize_sched_expedited(void)
93873 for_each_cpu(cpu, cm) {
93874 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
93875
93876- if (!(atomic_add_return(0, &rdtp->dynticks) & 0x1))
93877+ if (!(atomic_add_return_unchecked(0, &rdtp->dynticks) & 0x1))
93878 cpumask_clear_cpu(cpu, cm);
93879 }
93880 if (cpumask_weight(cm) == 0)
93881@@ -3151,14 +3151,14 @@ void synchronize_sched_expedited(void)
93882 synchronize_sched_expedited_cpu_stop,
93883 NULL) == -EAGAIN) {
93884 put_online_cpus();
93885- atomic_long_inc(&rsp->expedited_tryfail);
93886+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
93887
93888 /* Check to see if someone else did our work for us. */
93889 s = atomic_long_read(&rsp->expedited_done);
93890 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
93891 /* ensure test happens before caller kfree */
93892 smp_mb__before_atomic(); /* ^^^ */
93893- atomic_long_inc(&rsp->expedited_workdone1);
93894+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
93895 free_cpumask_var(cm);
93896 return;
93897 }
93898@@ -3168,7 +3168,7 @@ void synchronize_sched_expedited(void)
93899 udelay(trycount * num_online_cpus());
93900 } else {
93901 wait_rcu_gp(call_rcu_sched);
93902- atomic_long_inc(&rsp->expedited_normal);
93903+ atomic_long_inc_unchecked(&rsp->expedited_normal);
93904 free_cpumask_var(cm);
93905 return;
93906 }
93907@@ -3178,7 +3178,7 @@ void synchronize_sched_expedited(void)
93908 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
93909 /* ensure test happens before caller kfree */
93910 smp_mb__before_atomic(); /* ^^^ */
93911- atomic_long_inc(&rsp->expedited_workdone2);
93912+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
93913 free_cpumask_var(cm);
93914 return;
93915 }
93916@@ -3193,14 +3193,14 @@ void synchronize_sched_expedited(void)
93917 if (!try_get_online_cpus()) {
93918 /* CPU hotplug operation in flight, use normal GP. */
93919 wait_rcu_gp(call_rcu_sched);
93920- atomic_long_inc(&rsp->expedited_normal);
93921+ atomic_long_inc_unchecked(&rsp->expedited_normal);
93922 free_cpumask_var(cm);
93923 return;
93924 }
93925- snap = atomic_long_read(&rsp->expedited_start);
93926+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
93927 smp_mb(); /* ensure read is before try_stop_cpus(). */
93928 }
93929- atomic_long_inc(&rsp->expedited_stoppedcpus);
93930+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
93931
93932 all_cpus_idle:
93933 free_cpumask_var(cm);
93934@@ -3212,16 +3212,16 @@ all_cpus_idle:
93935 * than we did already did their update.
93936 */
93937 do {
93938- atomic_long_inc(&rsp->expedited_done_tries);
93939+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
93940 s = atomic_long_read(&rsp->expedited_done);
93941 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
93942 /* ensure test happens before caller kfree */
93943 smp_mb__before_atomic(); /* ^^^ */
93944- atomic_long_inc(&rsp->expedited_done_lost);
93945+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
93946 break;
93947 }
93948 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
93949- atomic_long_inc(&rsp->expedited_done_exit);
93950+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
93951
93952 put_online_cpus();
93953 }
93954@@ -3431,7 +3431,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
93955 * ACCESS_ONCE() to prevent the compiler from speculating
93956 * the increment to precede the early-exit check.
93957 */
93958- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
93959+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
93960 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
93961 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
93962 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
93963@@ -3487,7 +3487,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
93964
93965 /* Increment ->n_barrier_done to prevent duplicate work. */
93966 smp_mb(); /* Keep increment after above mechanism. */
93967- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
93968+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
93969 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
93970 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
93971 smp_mb(); /* Keep increment before caller's subsequent code. */
93972@@ -3532,7 +3532,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
93973 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
93974 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
93975 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
93976- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
93977+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
93978 rdp->cpu = cpu;
93979 rdp->rsp = rsp;
93980 rcu_boot_init_nocb_percpu_data(rdp);
93981@@ -3565,8 +3565,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
93982 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
93983 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
93984 rcu_sysidle_init_percpu_data(rdp->dynticks);
93985- atomic_set(&rdp->dynticks->dynticks,
93986- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
93987+ atomic_set_unchecked(&rdp->dynticks->dynticks,
93988+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
93989 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
93990
93991 /* Add CPU to rcu_node bitmasks. */
93992diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
93993index 119de39..f07d31a 100644
93994--- a/kernel/rcu/tree.h
93995+++ b/kernel/rcu/tree.h
93996@@ -86,11 +86,11 @@ struct rcu_dynticks {
93997 long long dynticks_nesting; /* Track irq/process nesting level. */
93998 /* Process level is worth LLONG_MAX/2. */
93999 int dynticks_nmi_nesting; /* Track NMI nesting level. */
94000- atomic_t dynticks; /* Even value for idle, else odd. */
94001+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
94002 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
94003 long long dynticks_idle_nesting;
94004 /* irq/process nesting level from idle. */
94005- atomic_t dynticks_idle; /* Even value for idle, else odd. */
94006+ atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */
94007 /* "Idle" excludes userspace execution. */
94008 unsigned long dynticks_idle_jiffies;
94009 /* End of last non-NMI non-idle period. */
94010@@ -457,17 +457,17 @@ struct rcu_state {
94011 /* _rcu_barrier(). */
94012 /* End of fields guarded by barrier_mutex. */
94013
94014- atomic_long_t expedited_start; /* Starting ticket. */
94015- atomic_long_t expedited_done; /* Done ticket. */
94016- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
94017- atomic_long_t expedited_tryfail; /* # acquisition failures. */
94018- atomic_long_t expedited_workdone1; /* # done by others #1. */
94019- atomic_long_t expedited_workdone2; /* # done by others #2. */
94020- atomic_long_t expedited_normal; /* # fallbacks to normal. */
94021- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
94022- atomic_long_t expedited_done_tries; /* # tries to update _done. */
94023- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
94024- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
94025+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
94026+ atomic_long_t expedited_done; /* Done ticket. */
94027+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
94028+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
94029+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
94030+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
94031+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
94032+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
94033+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
94034+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
94035+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
94036
94037 unsigned long jiffies_force_qs; /* Time at which to invoke */
94038 /* force_quiescent_state(). */
94039diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
94040index 0a571e9..fbfd611 100644
94041--- a/kernel/rcu/tree_plugin.h
94042+++ b/kernel/rcu/tree_plugin.h
94043@@ -619,7 +619,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
94044 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
94045 {
94046 return !rcu_preempted_readers_exp(rnp) &&
94047- ACCESS_ONCE(rnp->expmask) == 0;
94048+ ACCESS_ONCE_RW(rnp->expmask) == 0;
94049 }
94050
94051 /*
94052@@ -780,7 +780,7 @@ void synchronize_rcu_expedited(void)
94053
94054 /* Clean up and exit. */
94055 smp_mb(); /* ensure expedited GP seen before counter increment. */
94056- ACCESS_ONCE(sync_rcu_preempt_exp_count) =
94057+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count) =
94058 sync_rcu_preempt_exp_count + 1;
94059 unlock_mb_ret:
94060 mutex_unlock(&sync_rcu_preempt_exp_mutex);
94061@@ -1290,7 +1290,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
94062 free_cpumask_var(cm);
94063 }
94064
94065-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
94066+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
94067 .store = &rcu_cpu_kthread_task,
94068 .thread_should_run = rcu_cpu_kthread_should_run,
94069 .thread_fn = rcu_cpu_kthread,
94070@@ -1761,7 +1761,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
94071 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
94072 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n",
94073 cpu, ticks_value, ticks_title,
94074- atomic_read(&rdtp->dynticks) & 0xfff,
94075+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
94076 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
94077 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
94078 ACCESS_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
94079@@ -1906,7 +1906,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
94080 return;
94081 if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
94082 /* Prior smp_mb__after_atomic() orders against prior enqueue. */
94083- ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
94084+ ACCESS_ONCE_RW(rdp_leader->nocb_leader_sleep) = false;
94085 wake_up(&rdp_leader->nocb_wq);
94086 }
94087 }
94088@@ -1978,7 +1978,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
94089 atomic_long_add(rhcount, &rdp->nocb_q_count);
94090 /* rcu_barrier() relies on ->nocb_q_count add before xchg. */
94091 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
94092- ACCESS_ONCE(*old_rhpp) = rhp;
94093+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
94094 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
94095 smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
94096
94097@@ -2167,7 +2167,7 @@ wait_again:
94098 continue; /* No CBs here, try next follower. */
94099
94100 /* Move callbacks to wait-for-GP list, which is empty. */
94101- ACCESS_ONCE(rdp->nocb_head) = NULL;
94102+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
94103 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
94104 gotcbs = true;
94105 }
94106@@ -2288,7 +2288,7 @@ static int rcu_nocb_kthread(void *arg)
94107 list = ACCESS_ONCE(rdp->nocb_follower_head);
94108 BUG_ON(!list);
94109 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
94110- ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
94111+ ACCESS_ONCE_RW(rdp->nocb_follower_head) = NULL;
94112 tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
94113
94114 /* Each pass through the following loop invokes a callback. */
94115@@ -2338,7 +2338,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
94116 if (!rcu_nocb_need_deferred_wakeup(rdp))
94117 return;
94118 ndw = ACCESS_ONCE(rdp->nocb_defer_wakeup);
94119- ACCESS_ONCE(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
94120+ ACCESS_ONCE_RW(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
94121 wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE);
94122 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
94123 }
94124@@ -2461,7 +2461,7 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
94125 t = kthread_run(rcu_nocb_kthread, rdp_spawn,
94126 "rcuo%c/%d", rsp->abbr, cpu);
94127 BUG_ON(IS_ERR(t));
94128- ACCESS_ONCE(rdp_spawn->nocb_kthread) = t;
94129+ ACCESS_ONCE_RW(rdp_spawn->nocb_kthread) = t;
94130 }
94131
94132 /*
94133@@ -2666,11 +2666,11 @@ static void rcu_sysidle_enter(int irq)
94134
94135 /* Record start of fully idle period. */
94136 j = jiffies;
94137- ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
94138+ ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
94139 smp_mb__before_atomic();
94140- atomic_inc(&rdtp->dynticks_idle);
94141+ atomic_inc_unchecked(&rdtp->dynticks_idle);
94142 smp_mb__after_atomic();
94143- WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
94144+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
94145 }
94146
94147 /*
94148@@ -2741,9 +2741,9 @@ static void rcu_sysidle_exit(int irq)
94149
94150 /* Record end of idle period. */
94151 smp_mb__before_atomic();
94152- atomic_inc(&rdtp->dynticks_idle);
94153+ atomic_inc_unchecked(&rdtp->dynticks_idle);
94154 smp_mb__after_atomic();
94155- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
94156+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
94157
94158 /*
94159 * If we are the timekeeping CPU, we are permitted to be non-idle
94160@@ -2788,7 +2788,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
94161 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
94162
94163 /* Pick up current idle and NMI-nesting counter and check. */
94164- cur = atomic_read(&rdtp->dynticks_idle);
94165+ cur = atomic_read_unchecked(&rdtp->dynticks_idle);
94166 if (cur & 0x1) {
94167 *isidle = false; /* We are not idle! */
94168 return;
94169@@ -2837,7 +2837,7 @@ static void rcu_sysidle(unsigned long j)
94170 case RCU_SYSIDLE_NOT:
94171
94172 /* First time all are idle, so note a short idle period. */
94173- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
94174+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
94175 break;
94176
94177 case RCU_SYSIDLE_SHORT:
94178@@ -2875,7 +2875,7 @@ static void rcu_sysidle_cancel(void)
94179 {
94180 smp_mb();
94181 if (full_sysidle_state > RCU_SYSIDLE_SHORT)
94182- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
94183+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
94184 }
94185
94186 /*
94187@@ -2927,7 +2927,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
94188 smp_mb(); /* grace period precedes setting inuse. */
94189
94190 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
94191- ACCESS_ONCE(rshp->inuse) = 0;
94192+ ACCESS_ONCE_RW(rshp->inuse) = 0;
94193 }
94194
94195 /*
94196@@ -3080,7 +3080,7 @@ static void rcu_bind_gp_kthread(void)
94197 static void rcu_dynticks_task_enter(void)
94198 {
94199 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
94200- ACCESS_ONCE(current->rcu_tasks_idle_cpu) = smp_processor_id();
94201+ ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = smp_processor_id();
94202 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
94203 }
94204
94205@@ -3088,6 +3088,6 @@ static void rcu_dynticks_task_enter(void)
94206 static void rcu_dynticks_task_exit(void)
94207 {
94208 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
94209- ACCESS_ONCE(current->rcu_tasks_idle_cpu) = -1;
94210+ ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = -1;
94211 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
94212 }
94213diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
94214index fbb6240..f6c5097 100644
94215--- a/kernel/rcu/tree_trace.c
94216+++ b/kernel/rcu/tree_trace.c
94217@@ -125,7 +125,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
94218 rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu),
94219 rdp->qs_pending);
94220 seq_printf(m, " dt=%d/%llx/%d df=%lu",
94221- atomic_read(&rdp->dynticks->dynticks),
94222+ atomic_read_unchecked(&rdp->dynticks->dynticks),
94223 rdp->dynticks->dynticks_nesting,
94224 rdp->dynticks->dynticks_nmi_nesting,
94225 rdp->dynticks_fqs);
94226@@ -186,17 +186,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
94227 struct rcu_state *rsp = (struct rcu_state *)m->private;
94228
94229 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
94230- atomic_long_read(&rsp->expedited_start),
94231+ atomic_long_read_unchecked(&rsp->expedited_start),
94232 atomic_long_read(&rsp->expedited_done),
94233- atomic_long_read(&rsp->expedited_wrap),
94234- atomic_long_read(&rsp->expedited_tryfail),
94235- atomic_long_read(&rsp->expedited_workdone1),
94236- atomic_long_read(&rsp->expedited_workdone2),
94237- atomic_long_read(&rsp->expedited_normal),
94238- atomic_long_read(&rsp->expedited_stoppedcpus),
94239- atomic_long_read(&rsp->expedited_done_tries),
94240- atomic_long_read(&rsp->expedited_done_lost),
94241- atomic_long_read(&rsp->expedited_done_exit));
94242+ atomic_long_read_unchecked(&rsp->expedited_wrap),
94243+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
94244+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
94245+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
94246+ atomic_long_read_unchecked(&rsp->expedited_normal),
94247+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
94248+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
94249+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
94250+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
94251 return 0;
94252 }
94253
94254diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
94255index e0d31a3..f4dafe3 100644
94256--- a/kernel/rcu/update.c
94257+++ b/kernel/rcu/update.c
94258@@ -342,10 +342,10 @@ int rcu_jiffies_till_stall_check(void)
94259 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
94260 */
94261 if (till_stall_check < 3) {
94262- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
94263+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
94264 till_stall_check = 3;
94265 } else if (till_stall_check > 300) {
94266- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
94267+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
94268 till_stall_check = 300;
94269 }
94270 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
94271@@ -501,7 +501,7 @@ static void check_holdout_task(struct task_struct *t,
94272 !ACCESS_ONCE(t->on_rq) ||
94273 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
94274 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
94275- ACCESS_ONCE(t->rcu_tasks_holdout) = false;
94276+ ACCESS_ONCE_RW(t->rcu_tasks_holdout) = false;
94277 list_del_init(&t->rcu_tasks_holdout_list);
94278 put_task_struct(t);
94279 return;
94280@@ -589,7 +589,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
94281 !is_idle_task(t)) {
94282 get_task_struct(t);
94283 t->rcu_tasks_nvcsw = ACCESS_ONCE(t->nvcsw);
94284- ACCESS_ONCE(t->rcu_tasks_holdout) = true;
94285+ ACCESS_ONCE_RW(t->rcu_tasks_holdout) = true;
94286 list_add(&t->rcu_tasks_holdout_list,
94287 &rcu_tasks_holdouts);
94288 }
94289@@ -686,7 +686,7 @@ static void rcu_spawn_tasks_kthread(void)
94290 t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
94291 BUG_ON(IS_ERR(t));
94292 smp_mb(); /* Ensure others see full kthread. */
94293- ACCESS_ONCE(rcu_tasks_kthread_ptr) = t;
94294+ ACCESS_ONCE_RW(rcu_tasks_kthread_ptr) = t;
94295 mutex_unlock(&rcu_tasks_kthread_mutex);
94296 }
94297
94298diff --git a/kernel/resource.c b/kernel/resource.c
94299index 19f2357..ebe7f35 100644
94300--- a/kernel/resource.c
94301+++ b/kernel/resource.c
94302@@ -162,8 +162,18 @@ static const struct file_operations proc_iomem_operations = {
94303
94304 static int __init ioresources_init(void)
94305 {
94306+#ifdef CONFIG_GRKERNSEC_PROC_ADD
94307+#ifdef CONFIG_GRKERNSEC_PROC_USER
94308+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
94309+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
94310+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
94311+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
94312+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
94313+#endif
94314+#else
94315 proc_create("ioports", 0, NULL, &proc_ioports_operations);
94316 proc_create("iomem", 0, NULL, &proc_iomem_operations);
94317+#endif
94318 return 0;
94319 }
94320 __initcall(ioresources_init);
94321diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
94322index eae160d..c9aa22e 100644
94323--- a/kernel/sched/auto_group.c
94324+++ b/kernel/sched/auto_group.c
94325@@ -11,7 +11,7 @@
94326
94327 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
94328 static struct autogroup autogroup_default;
94329-static atomic_t autogroup_seq_nr;
94330+static atomic_unchecked_t autogroup_seq_nr;
94331
94332 void __init autogroup_init(struct task_struct *init_task)
94333 {
94334@@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void)
94335
94336 kref_init(&ag->kref);
94337 init_rwsem(&ag->lock);
94338- ag->id = atomic_inc_return(&autogroup_seq_nr);
94339+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
94340 ag->tg = tg;
94341 #ifdef CONFIG_RT_GROUP_SCHED
94342 /*
94343diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
94344index 8d0f35d..c16360d 100644
94345--- a/kernel/sched/completion.c
94346+++ b/kernel/sched/completion.c
94347@@ -205,7 +205,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
94348 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
94349 * or number of jiffies left till timeout) if completed.
94350 */
94351-long __sched
94352+long __sched __intentional_overflow(-1)
94353 wait_for_completion_interruptible_timeout(struct completion *x,
94354 unsigned long timeout)
94355 {
94356@@ -222,7 +222,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
94357 *
94358 * Return: -ERESTARTSYS if interrupted, 0 if completed.
94359 */
94360-int __sched wait_for_completion_killable(struct completion *x)
94361+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
94362 {
94363 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
94364 if (t == -ERESTARTSYS)
94365@@ -243,7 +243,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
94366 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
94367 * or number of jiffies left till timeout) if completed.
94368 */
94369-long __sched
94370+long __sched __intentional_overflow(-1)
94371 wait_for_completion_killable_timeout(struct completion *x,
94372 unsigned long timeout)
94373 {
94374diff --git a/kernel/sched/core.c b/kernel/sched/core.c
94375index 3d5f6f6..a94298f 100644
94376--- a/kernel/sched/core.c
94377+++ b/kernel/sched/core.c
94378@@ -1862,7 +1862,7 @@ void set_numabalancing_state(bool enabled)
94379 int sysctl_numa_balancing(struct ctl_table *table, int write,
94380 void __user *buffer, size_t *lenp, loff_t *ppos)
94381 {
94382- struct ctl_table t;
94383+ ctl_table_no_const t;
94384 int err;
94385 int state = numabalancing_enabled;
94386
94387@@ -2312,8 +2312,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
94388 next->active_mm = oldmm;
94389 atomic_inc(&oldmm->mm_count);
94390 enter_lazy_tlb(oldmm, next);
94391- } else
94392+ } else {
94393 switch_mm(oldmm, mm, next);
94394+ populate_stack();
94395+ }
94396
94397 if (!prev->mm) {
94398 prev->active_mm = NULL;
94399@@ -3124,6 +3126,8 @@ int can_nice(const struct task_struct *p, const int nice)
94400 /* convert nice value [19,-20] to rlimit style value [1,40] */
94401 int nice_rlim = nice_to_rlimit(nice);
94402
94403+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
94404+
94405 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
94406 capable(CAP_SYS_NICE));
94407 }
94408@@ -3150,7 +3154,8 @@ SYSCALL_DEFINE1(nice, int, increment)
94409 nice = task_nice(current) + increment;
94410
94411 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
94412- if (increment < 0 && !can_nice(current, nice))
94413+ if (increment < 0 && (!can_nice(current, nice) ||
94414+ gr_handle_chroot_nice()))
94415 return -EPERM;
94416
94417 retval = security_task_setnice(current, nice);
94418@@ -3459,6 +3464,7 @@ recheck:
94419 if (policy != p->policy && !rlim_rtprio)
94420 return -EPERM;
94421
94422+ gr_learn_resource(p, RLIMIT_RTPRIO, attr->sched_priority, 1);
94423 /* can't increase priority */
94424 if (attr->sched_priority > p->rt_priority &&
94425 attr->sched_priority > rlim_rtprio)
94426@@ -4946,6 +4952,7 @@ void idle_task_exit(void)
94427
94428 if (mm != &init_mm) {
94429 switch_mm(mm, &init_mm, current);
94430+ populate_stack();
94431 finish_arch_post_lock_switch();
94432 }
94433 mmdrop(mm);
94434@@ -5041,7 +5048,7 @@ static void migrate_tasks(unsigned int dead_cpu)
94435
94436 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
94437
94438-static struct ctl_table sd_ctl_dir[] = {
94439+static ctl_table_no_const sd_ctl_dir[] __read_only = {
94440 {
94441 .procname = "sched_domain",
94442 .mode = 0555,
94443@@ -5058,17 +5065,17 @@ static struct ctl_table sd_ctl_root[] = {
94444 {}
94445 };
94446
94447-static struct ctl_table *sd_alloc_ctl_entry(int n)
94448+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
94449 {
94450- struct ctl_table *entry =
94451+ ctl_table_no_const *entry =
94452 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
94453
94454 return entry;
94455 }
94456
94457-static void sd_free_ctl_entry(struct ctl_table **tablep)
94458+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
94459 {
94460- struct ctl_table *entry;
94461+ ctl_table_no_const *entry;
94462
94463 /*
94464 * In the intermediate directories, both the child directory and
94465@@ -5076,22 +5083,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
94466 * will always be set. In the lowest directory the names are
94467 * static strings and all have proc handlers.
94468 */
94469- for (entry = *tablep; entry->mode; entry++) {
94470- if (entry->child)
94471- sd_free_ctl_entry(&entry->child);
94472+ for (entry = tablep; entry->mode; entry++) {
94473+ if (entry->child) {
94474+ sd_free_ctl_entry(entry->child);
94475+ pax_open_kernel();
94476+ entry->child = NULL;
94477+ pax_close_kernel();
94478+ }
94479 if (entry->proc_handler == NULL)
94480 kfree(entry->procname);
94481 }
94482
94483- kfree(*tablep);
94484- *tablep = NULL;
94485+ kfree(tablep);
94486 }
94487
94488 static int min_load_idx = 0;
94489 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
94490
94491 static void
94492-set_table_entry(struct ctl_table *entry,
94493+set_table_entry(ctl_table_no_const *entry,
94494 const char *procname, void *data, int maxlen,
94495 umode_t mode, proc_handler *proc_handler,
94496 bool load_idx)
94497@@ -5111,7 +5121,7 @@ set_table_entry(struct ctl_table *entry,
94498 static struct ctl_table *
94499 sd_alloc_ctl_domain_table(struct sched_domain *sd)
94500 {
94501- struct ctl_table *table = sd_alloc_ctl_entry(14);
94502+ ctl_table_no_const *table = sd_alloc_ctl_entry(14);
94503
94504 if (table == NULL)
94505 return NULL;
94506@@ -5149,9 +5159,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
94507 return table;
94508 }
94509
94510-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
94511+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
94512 {
94513- struct ctl_table *entry, *table;
94514+ ctl_table_no_const *entry, *table;
94515 struct sched_domain *sd;
94516 int domain_num = 0, i;
94517 char buf[32];
94518@@ -5178,11 +5188,13 @@ static struct ctl_table_header *sd_sysctl_header;
94519 static void register_sched_domain_sysctl(void)
94520 {
94521 int i, cpu_num = num_possible_cpus();
94522- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
94523+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
94524 char buf[32];
94525
94526 WARN_ON(sd_ctl_dir[0].child);
94527+ pax_open_kernel();
94528 sd_ctl_dir[0].child = entry;
94529+ pax_close_kernel();
94530
94531 if (entry == NULL)
94532 return;
94533@@ -5205,8 +5217,12 @@ static void unregister_sched_domain_sysctl(void)
94534 if (sd_sysctl_header)
94535 unregister_sysctl_table(sd_sysctl_header);
94536 sd_sysctl_header = NULL;
94537- if (sd_ctl_dir[0].child)
94538- sd_free_ctl_entry(&sd_ctl_dir[0].child);
94539+ if (sd_ctl_dir[0].child) {
94540+ sd_free_ctl_entry(sd_ctl_dir[0].child);
94541+ pax_open_kernel();
94542+ sd_ctl_dir[0].child = NULL;
94543+ pax_close_kernel();
94544+ }
94545 }
94546 #else
94547 static void register_sched_domain_sysctl(void)
94548diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
94549index 241213b..6a64c91 100644
94550--- a/kernel/sched/fair.c
94551+++ b/kernel/sched/fair.c
94552@@ -2092,7 +2092,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
94553
94554 static void reset_ptenuma_scan(struct task_struct *p)
94555 {
94556- ACCESS_ONCE(p->mm->numa_scan_seq)++;
94557+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
94558 p->mm->numa_scan_offset = 0;
94559 }
94560
94561@@ -7656,7 +7656,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
94562 * run_rebalance_domains is triggered when needed from the scheduler tick.
94563 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
94564 */
94565-static void run_rebalance_domains(struct softirq_action *h)
94566+static __latent_entropy void run_rebalance_domains(void)
94567 {
94568 struct rq *this_rq = this_rq();
94569 enum cpu_idle_type idle = this_rq->idle_balance ?
94570diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
94571index dc0f435..ae2e085 100644
94572--- a/kernel/sched/sched.h
94573+++ b/kernel/sched/sched.h
94574@@ -1200,7 +1200,7 @@ struct sched_class {
94575 #ifdef CONFIG_FAIR_GROUP_SCHED
94576 void (*task_move_group) (struct task_struct *p, int on_rq);
94577 #endif
94578-};
94579+} __do_const;
94580
94581 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
94582 {
94583diff --git a/kernel/signal.c b/kernel/signal.c
94584index a390499..ebe9a21 100644
94585--- a/kernel/signal.c
94586+++ b/kernel/signal.c
94587@@ -53,12 +53,12 @@ static struct kmem_cache *sigqueue_cachep;
94588
94589 int print_fatal_signals __read_mostly;
94590
94591-static void __user *sig_handler(struct task_struct *t, int sig)
94592+static __sighandler_t sig_handler(struct task_struct *t, int sig)
94593 {
94594 return t->sighand->action[sig - 1].sa.sa_handler;
94595 }
94596
94597-static int sig_handler_ignored(void __user *handler, int sig)
94598+static int sig_handler_ignored(__sighandler_t handler, int sig)
94599 {
94600 /* Is it explicitly or implicitly ignored? */
94601 return handler == SIG_IGN ||
94602@@ -67,7 +67,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
94603
94604 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
94605 {
94606- void __user *handler;
94607+ __sighandler_t handler;
94608
94609 handler = sig_handler(t, sig);
94610
94611@@ -372,6 +372,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
94612 atomic_inc(&user->sigpending);
94613 rcu_read_unlock();
94614
94615+ if (!override_rlimit)
94616+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
94617+
94618 if (override_rlimit ||
94619 atomic_read(&user->sigpending) <=
94620 task_rlimit(t, RLIMIT_SIGPENDING)) {
94621@@ -499,7 +502,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
94622
94623 int unhandled_signal(struct task_struct *tsk, int sig)
94624 {
94625- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
94626+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
94627 if (is_global_init(tsk))
94628 return 1;
94629 if (handler != SIG_IGN && handler != SIG_DFL)
94630@@ -793,6 +796,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
94631 }
94632 }
94633
94634+ /* allow glibc communication via tgkill to other threads in our
94635+ thread group */
94636+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
94637+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
94638+ && gr_handle_signal(t, sig))
94639+ return -EPERM;
94640+
94641 return security_task_kill(t, info, sig, 0);
94642 }
94643
94644@@ -1176,7 +1186,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
94645 return send_signal(sig, info, p, 1);
94646 }
94647
94648-static int
94649+int
94650 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
94651 {
94652 return send_signal(sig, info, t, 0);
94653@@ -1213,6 +1223,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
94654 unsigned long int flags;
94655 int ret, blocked, ignored;
94656 struct k_sigaction *action;
94657+ int is_unhandled = 0;
94658
94659 spin_lock_irqsave(&t->sighand->siglock, flags);
94660 action = &t->sighand->action[sig-1];
94661@@ -1227,9 +1238,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
94662 }
94663 if (action->sa.sa_handler == SIG_DFL)
94664 t->signal->flags &= ~SIGNAL_UNKILLABLE;
94665+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
94666+ is_unhandled = 1;
94667 ret = specific_send_sig_info(sig, info, t);
94668 spin_unlock_irqrestore(&t->sighand->siglock, flags);
94669
94670+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
94671+ normal operation */
94672+ if (is_unhandled) {
94673+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
94674+ gr_handle_crash(t, sig);
94675+ }
94676+
94677 return ret;
94678 }
94679
94680@@ -1310,8 +1330,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
94681 ret = check_kill_permission(sig, info, p);
94682 rcu_read_unlock();
94683
94684- if (!ret && sig)
94685+ if (!ret && sig) {
94686 ret = do_send_sig_info(sig, info, p, true);
94687+ if (!ret)
94688+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
94689+ }
94690
94691 return ret;
94692 }
94693@@ -2915,7 +2938,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
94694 int error = -ESRCH;
94695
94696 rcu_read_lock();
94697- p = find_task_by_vpid(pid);
94698+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
94699+ /* allow glibc communication via tgkill to other threads in our
94700+ thread group */
94701+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
94702+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
94703+ p = find_task_by_vpid_unrestricted(pid);
94704+ else
94705+#endif
94706+ p = find_task_by_vpid(pid);
94707 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
94708 error = check_kill_permission(sig, info, p);
94709 /*
94710@@ -3248,8 +3279,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
94711 }
94712 seg = get_fs();
94713 set_fs(KERNEL_DS);
94714- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
94715- (stack_t __force __user *) &uoss,
94716+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
94717+ (stack_t __force_user *) &uoss,
94718 compat_user_stack_pointer());
94719 set_fs(seg);
94720 if (ret >= 0 && uoss_ptr) {
94721diff --git a/kernel/smpboot.c b/kernel/smpboot.c
94722index 40190f2..8861d40 100644
94723--- a/kernel/smpboot.c
94724+++ b/kernel/smpboot.c
94725@@ -290,7 +290,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
94726 }
94727 smpboot_unpark_thread(plug_thread, cpu);
94728 }
94729- list_add(&plug_thread->list, &hotplug_threads);
94730+ pax_list_add(&plug_thread->list, &hotplug_threads);
94731 out:
94732 mutex_unlock(&smpboot_threads_lock);
94733 put_online_cpus();
94734@@ -308,7 +308,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
94735 {
94736 get_online_cpus();
94737 mutex_lock(&smpboot_threads_lock);
94738- list_del(&plug_thread->list);
94739+ pax_list_del(&plug_thread->list);
94740 smpboot_destroy_threads(plug_thread);
94741 mutex_unlock(&smpboot_threads_lock);
94742 put_online_cpus();
94743diff --git a/kernel/softirq.c b/kernel/softirq.c
94744index 479e443..66d845e1 100644
94745--- a/kernel/softirq.c
94746+++ b/kernel/softirq.c
94747@@ -53,7 +53,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
94748 EXPORT_SYMBOL(irq_stat);
94749 #endif
94750
94751-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
94752+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
94753
94754 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
94755
94756@@ -270,7 +270,7 @@ restart:
94757 kstat_incr_softirqs_this_cpu(vec_nr);
94758
94759 trace_softirq_entry(vec_nr);
94760- h->action(h);
94761+ h->action();
94762 trace_softirq_exit(vec_nr);
94763 if (unlikely(prev_count != preempt_count())) {
94764 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
94765@@ -430,7 +430,7 @@ void __raise_softirq_irqoff(unsigned int nr)
94766 or_softirq_pending(1UL << nr);
94767 }
94768
94769-void open_softirq(int nr, void (*action)(struct softirq_action *))
94770+void __init open_softirq(int nr, void (*action)(void))
94771 {
94772 softirq_vec[nr].action = action;
94773 }
94774@@ -482,7 +482,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
94775 }
94776 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
94777
94778-static void tasklet_action(struct softirq_action *a)
94779+static void tasklet_action(void)
94780 {
94781 struct tasklet_struct *list;
94782
94783@@ -518,7 +518,7 @@ static void tasklet_action(struct softirq_action *a)
94784 }
94785 }
94786
94787-static void tasklet_hi_action(struct softirq_action *a)
94788+static __latent_entropy void tasklet_hi_action(void)
94789 {
94790 struct tasklet_struct *list;
94791
94792@@ -744,7 +744,7 @@ static struct notifier_block cpu_nfb = {
94793 .notifier_call = cpu_callback
94794 };
94795
94796-static struct smp_hotplug_thread softirq_threads = {
94797+static struct smp_hotplug_thread softirq_threads __read_only = {
94798 .store = &ksoftirqd,
94799 .thread_should_run = ksoftirqd_should_run,
94800 .thread_fn = run_ksoftirqd,
94801diff --git a/kernel/sys.c b/kernel/sys.c
94802index a03d9cd..55dbe9c 100644
94803--- a/kernel/sys.c
94804+++ b/kernel/sys.c
94805@@ -160,6 +160,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
94806 error = -EACCES;
94807 goto out;
94808 }
94809+
94810+ if (gr_handle_chroot_setpriority(p, niceval)) {
94811+ error = -EACCES;
94812+ goto out;
94813+ }
94814+
94815 no_nice = security_task_setnice(p, niceval);
94816 if (no_nice) {
94817 error = no_nice;
94818@@ -365,6 +371,20 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
94819 goto error;
94820 }
94821
94822+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
94823+ goto error;
94824+
94825+ if (!gid_eq(new->gid, old->gid)) {
94826+ /* make sure we generate a learn log for what will
94827+ end up being a role transition after a full-learning
94828+ policy is generated
94829+ CAP_SETGID is required to perform a transition
94830+ we may not log a CAP_SETGID check above, e.g.
94831+ in the case where new rgid = old egid
94832+ */
94833+ gr_learn_cap(current, new, CAP_SETGID);
94834+ }
94835+
94836 if (rgid != (gid_t) -1 ||
94837 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
94838 new->sgid = new->egid;
94839@@ -400,6 +420,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
94840 old = current_cred();
94841
94842 retval = -EPERM;
94843+
94844+ if (gr_check_group_change(kgid, kgid, kgid))
94845+ goto error;
94846+
94847 if (ns_capable(old->user_ns, CAP_SETGID))
94848 new->gid = new->egid = new->sgid = new->fsgid = kgid;
94849 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
94850@@ -417,7 +441,7 @@ error:
94851 /*
94852 * change the user struct in a credentials set to match the new UID
94853 */
94854-static int set_user(struct cred *new)
94855+int set_user(struct cred *new)
94856 {
94857 struct user_struct *new_user;
94858
94859@@ -497,7 +521,18 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
94860 goto error;
94861 }
94862
94863+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
94864+ goto error;
94865+
94866 if (!uid_eq(new->uid, old->uid)) {
94867+ /* make sure we generate a learn log for what will
94868+ end up being a role transition after a full-learning
94869+ policy is generated
94870+ CAP_SETUID is required to perform a transition
94871+ we may not log a CAP_SETUID check above, e.g.
94872+ in the case where new ruid = old euid
94873+ */
94874+ gr_learn_cap(current, new, CAP_SETUID);
94875 retval = set_user(new);
94876 if (retval < 0)
94877 goto error;
94878@@ -547,6 +582,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
94879 old = current_cred();
94880
94881 retval = -EPERM;
94882+
94883+ if (gr_check_crash_uid(kuid))
94884+ goto error;
94885+ if (gr_check_user_change(kuid, kuid, kuid))
94886+ goto error;
94887+
94888 if (ns_capable(old->user_ns, CAP_SETUID)) {
94889 new->suid = new->uid = kuid;
94890 if (!uid_eq(kuid, old->uid)) {
94891@@ -616,6 +657,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
94892 goto error;
94893 }
94894
94895+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
94896+ goto error;
94897+
94898 if (ruid != (uid_t) -1) {
94899 new->uid = kruid;
94900 if (!uid_eq(kruid, old->uid)) {
94901@@ -700,6 +744,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
94902 goto error;
94903 }
94904
94905+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
94906+ goto error;
94907+
94908 if (rgid != (gid_t) -1)
94909 new->gid = krgid;
94910 if (egid != (gid_t) -1)
94911@@ -764,12 +811,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
94912 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
94913 ns_capable(old->user_ns, CAP_SETUID)) {
94914 if (!uid_eq(kuid, old->fsuid)) {
94915+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
94916+ goto error;
94917+
94918 new->fsuid = kuid;
94919 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
94920 goto change_okay;
94921 }
94922 }
94923
94924+error:
94925 abort_creds(new);
94926 return old_fsuid;
94927
94928@@ -802,12 +853,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
94929 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
94930 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
94931 ns_capable(old->user_ns, CAP_SETGID)) {
94932+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
94933+ goto error;
94934+
94935 if (!gid_eq(kgid, old->fsgid)) {
94936 new->fsgid = kgid;
94937 goto change_okay;
94938 }
94939 }
94940
94941+error:
94942 abort_creds(new);
94943 return old_fsgid;
94944
94945@@ -1185,19 +1240,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
94946 return -EFAULT;
94947
94948 down_read(&uts_sem);
94949- error = __copy_to_user(&name->sysname, &utsname()->sysname,
94950+ error = __copy_to_user(name->sysname, &utsname()->sysname,
94951 __OLD_UTS_LEN);
94952 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
94953- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
94954+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
94955 __OLD_UTS_LEN);
94956 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
94957- error |= __copy_to_user(&name->release, &utsname()->release,
94958+ error |= __copy_to_user(name->release, &utsname()->release,
94959 __OLD_UTS_LEN);
94960 error |= __put_user(0, name->release + __OLD_UTS_LEN);
94961- error |= __copy_to_user(&name->version, &utsname()->version,
94962+ error |= __copy_to_user(name->version, &utsname()->version,
94963 __OLD_UTS_LEN);
94964 error |= __put_user(0, name->version + __OLD_UTS_LEN);
94965- error |= __copy_to_user(&name->machine, &utsname()->machine,
94966+ error |= __copy_to_user(name->machine, &utsname()->machine,
94967 __OLD_UTS_LEN);
94968 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
94969 up_read(&uts_sem);
94970@@ -1398,6 +1453,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
94971 */
94972 new_rlim->rlim_cur = 1;
94973 }
94974+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
94975+ is changed to a lower value. Since tasks can be created by the same
94976+ user in between this limit change and an execve by this task, force
94977+ a recheck only for this task by setting PF_NPROC_EXCEEDED
94978+ */
94979+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
94980+ tsk->flags |= PF_NPROC_EXCEEDED;
94981 }
94982 if (!retval) {
94983 if (old_rlim)
94984diff --git a/kernel/sysctl.c b/kernel/sysctl.c
94985index ce410bb..cd276f0 100644
94986--- a/kernel/sysctl.c
94987+++ b/kernel/sysctl.c
94988@@ -94,7 +94,6 @@
94989
94990
94991 #if defined(CONFIG_SYSCTL)
94992-
94993 /* External variables not in a header file. */
94994 extern int max_threads;
94995 extern int suid_dumpable;
94996@@ -115,19 +114,20 @@ extern int sysctl_nr_trim_pages;
94997
94998 /* Constants used for minimum and maximum */
94999 #ifdef CONFIG_LOCKUP_DETECTOR
95000-static int sixty = 60;
95001+static int sixty __read_only = 60;
95002 #endif
95003
95004-static int __maybe_unused neg_one = -1;
95005+static int __maybe_unused neg_one __read_only = -1;
95006
95007-static int zero;
95008-static int __maybe_unused one = 1;
95009-static int __maybe_unused two = 2;
95010-static int __maybe_unused four = 4;
95011-static unsigned long one_ul = 1;
95012-static int one_hundred = 100;
95013+static int zero __read_only = 0;
95014+static int __maybe_unused one __read_only = 1;
95015+static int __maybe_unused two __read_only = 2;
95016+static int __maybe_unused three __read_only = 3;
95017+static int __maybe_unused four __read_only = 4;
95018+static unsigned long one_ul __read_only = 1;
95019+static int one_hundred __read_only = 100;
95020 #ifdef CONFIG_PRINTK
95021-static int ten_thousand = 10000;
95022+static int ten_thousand __read_only = 10000;
95023 #endif
95024
95025 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
95026@@ -181,10 +181,8 @@ static int proc_taint(struct ctl_table *table, int write,
95027 void __user *buffer, size_t *lenp, loff_t *ppos);
95028 #endif
95029
95030-#ifdef CONFIG_PRINTK
95031 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
95032 void __user *buffer, size_t *lenp, loff_t *ppos);
95033-#endif
95034
95035 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
95036 void __user *buffer, size_t *lenp, loff_t *ppos);
95037@@ -215,6 +213,8 @@ static int sysrq_sysctl_handler(struct ctl_table *table, int write,
95038
95039 #endif
95040
95041+extern struct ctl_table grsecurity_table[];
95042+
95043 static struct ctl_table kern_table[];
95044 static struct ctl_table vm_table[];
95045 static struct ctl_table fs_table[];
95046@@ -229,6 +229,20 @@ extern struct ctl_table epoll_table[];
95047 int sysctl_legacy_va_layout;
95048 #endif
95049
95050+#ifdef CONFIG_PAX_SOFTMODE
95051+static struct ctl_table pax_table[] = {
95052+ {
95053+ .procname = "softmode",
95054+ .data = &pax_softmode,
95055+ .maxlen = sizeof(unsigned int),
95056+ .mode = 0600,
95057+ .proc_handler = &proc_dointvec,
95058+ },
95059+
95060+ { }
95061+};
95062+#endif
95063+
95064 /* The default sysctl tables: */
95065
95066 static struct ctl_table sysctl_base_table[] = {
95067@@ -277,6 +291,22 @@ static int max_extfrag_threshold = 1000;
95068 #endif
95069
95070 static struct ctl_table kern_table[] = {
95071+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
95072+ {
95073+ .procname = "grsecurity",
95074+ .mode = 0500,
95075+ .child = grsecurity_table,
95076+ },
95077+#endif
95078+
95079+#ifdef CONFIG_PAX_SOFTMODE
95080+ {
95081+ .procname = "pax",
95082+ .mode = 0500,
95083+ .child = pax_table,
95084+ },
95085+#endif
95086+
95087 {
95088 .procname = "sched_child_runs_first",
95089 .data = &sysctl_sched_child_runs_first,
95090@@ -649,7 +679,7 @@ static struct ctl_table kern_table[] = {
95091 .data = &modprobe_path,
95092 .maxlen = KMOD_PATH_LEN,
95093 .mode = 0644,
95094- .proc_handler = proc_dostring,
95095+ .proc_handler = proc_dostring_modpriv,
95096 },
95097 {
95098 .procname = "modules_disabled",
95099@@ -816,16 +846,20 @@ static struct ctl_table kern_table[] = {
95100 .extra1 = &zero,
95101 .extra2 = &one,
95102 },
95103+#endif
95104 {
95105 .procname = "kptr_restrict",
95106 .data = &kptr_restrict,
95107 .maxlen = sizeof(int),
95108 .mode = 0644,
95109 .proc_handler = proc_dointvec_minmax_sysadmin,
95110+#ifdef CONFIG_GRKERNSEC_HIDESYM
95111+ .extra1 = &two,
95112+#else
95113 .extra1 = &zero,
95114+#endif
95115 .extra2 = &two,
95116 },
95117-#endif
95118 {
95119 .procname = "ngroups_max",
95120 .data = &ngroups_max,
95121@@ -1072,10 +1106,17 @@ static struct ctl_table kern_table[] = {
95122 */
95123 {
95124 .procname = "perf_event_paranoid",
95125- .data = &sysctl_perf_event_paranoid,
95126- .maxlen = sizeof(sysctl_perf_event_paranoid),
95127+ .data = &sysctl_perf_event_legitimately_concerned,
95128+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
95129 .mode = 0644,
95130- .proc_handler = proc_dointvec,
95131+ /* go ahead, be a hero */
95132+ .proc_handler = proc_dointvec_minmax_sysadmin,
95133+ .extra1 = &neg_one,
95134+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
95135+ .extra2 = &three,
95136+#else
95137+ .extra2 = &two,
95138+#endif
95139 },
95140 {
95141 .procname = "perf_event_mlock_kb",
95142@@ -1348,6 +1389,13 @@ static struct ctl_table vm_table[] = {
95143 .proc_handler = proc_dointvec_minmax,
95144 .extra1 = &zero,
95145 },
95146+ {
95147+ .procname = "heap_stack_gap",
95148+ .data = &sysctl_heap_stack_gap,
95149+ .maxlen = sizeof(sysctl_heap_stack_gap),
95150+ .mode = 0644,
95151+ .proc_handler = proc_doulongvec_minmax,
95152+ },
95153 #else
95154 {
95155 .procname = "nr_trim_pages",
95156@@ -1830,6 +1878,16 @@ int proc_dostring(struct ctl_table *table, int write,
95157 (char __user *)buffer, lenp, ppos);
95158 }
95159
95160+int proc_dostring_modpriv(struct ctl_table *table, int write,
95161+ void __user *buffer, size_t *lenp, loff_t *ppos)
95162+{
95163+ if (write && !capable(CAP_SYS_MODULE))
95164+ return -EPERM;
95165+
95166+ return _proc_do_string(table->data, table->maxlen, write,
95167+ buffer, lenp, ppos);
95168+}
95169+
95170 static size_t proc_skip_spaces(char **buf)
95171 {
95172 size_t ret;
95173@@ -1935,6 +1993,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
95174 len = strlen(tmp);
95175 if (len > *size)
95176 len = *size;
95177+ if (len > sizeof(tmp))
95178+ len = sizeof(tmp);
95179 if (copy_to_user(*buf, tmp, len))
95180 return -EFAULT;
95181 *size -= len;
95182@@ -2112,7 +2172,7 @@ int proc_dointvec(struct ctl_table *table, int write,
95183 static int proc_taint(struct ctl_table *table, int write,
95184 void __user *buffer, size_t *lenp, loff_t *ppos)
95185 {
95186- struct ctl_table t;
95187+ ctl_table_no_const t;
95188 unsigned long tmptaint = get_taint();
95189 int err;
95190
95191@@ -2140,7 +2200,6 @@ static int proc_taint(struct ctl_table *table, int write,
95192 return err;
95193 }
95194
95195-#ifdef CONFIG_PRINTK
95196 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
95197 void __user *buffer, size_t *lenp, loff_t *ppos)
95198 {
95199@@ -2149,7 +2208,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
95200
95201 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
95202 }
95203-#endif
95204
95205 struct do_proc_dointvec_minmax_conv_param {
95206 int *min;
95207@@ -2709,6 +2767,12 @@ int proc_dostring(struct ctl_table *table, int write,
95208 return -ENOSYS;
95209 }
95210
95211+int proc_dostring_modpriv(struct ctl_table *table, int write,
95212+ void __user *buffer, size_t *lenp, loff_t *ppos)
95213+{
95214+ return -ENOSYS;
95215+}
95216+
95217 int proc_dointvec(struct ctl_table *table, int write,
95218 void __user *buffer, size_t *lenp, loff_t *ppos)
95219 {
95220@@ -2765,5 +2829,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
95221 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
95222 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
95223 EXPORT_SYMBOL(proc_dostring);
95224+EXPORT_SYMBOL(proc_dostring_modpriv);
95225 EXPORT_SYMBOL(proc_doulongvec_minmax);
95226 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
95227diff --git a/kernel/taskstats.c b/kernel/taskstats.c
95228index 21f82c2..c1984e5 100644
95229--- a/kernel/taskstats.c
95230+++ b/kernel/taskstats.c
95231@@ -28,9 +28,12 @@
95232 #include <linux/fs.h>
95233 #include <linux/file.h>
95234 #include <linux/pid_namespace.h>
95235+#include <linux/grsecurity.h>
95236 #include <net/genetlink.h>
95237 #include <linux/atomic.h>
95238
95239+extern int gr_is_taskstats_denied(int pid);
95240+
95241 /*
95242 * Maximum length of a cpumask that can be specified in
95243 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
95244@@ -567,6 +570,9 @@ err:
95245
95246 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
95247 {
95248+ if (gr_is_taskstats_denied(current->pid))
95249+ return -EACCES;
95250+
95251 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
95252 return cmd_attr_register_cpumask(info);
95253 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
95254diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
95255index 1b001ed..55ef9e4 100644
95256--- a/kernel/time/alarmtimer.c
95257+++ b/kernel/time/alarmtimer.c
95258@@ -823,7 +823,7 @@ static int __init alarmtimer_init(void)
95259 struct platform_device *pdev;
95260 int error = 0;
95261 int i;
95262- struct k_clock alarm_clock = {
95263+ static struct k_clock alarm_clock = {
95264 .clock_getres = alarm_clock_getres,
95265 .clock_get = alarm_clock_get,
95266 .timer_create = alarm_timer_create,
95267diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
95268index bee0c1f..a23fe2d 100644
95269--- a/kernel/time/hrtimer.c
95270+++ b/kernel/time/hrtimer.c
95271@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
95272 local_irq_restore(flags);
95273 }
95274
95275-static void run_hrtimer_softirq(struct softirq_action *h)
95276+static __latent_entropy void run_hrtimer_softirq(void)
95277 {
95278 hrtimer_peek_ahead_timers();
95279 }
95280diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
95281index 0075da7..63cc872 100644
95282--- a/kernel/time/posix-cpu-timers.c
95283+++ b/kernel/time/posix-cpu-timers.c
95284@@ -1449,14 +1449,14 @@ struct k_clock clock_posix_cpu = {
95285
95286 static __init int init_posix_cpu_timers(void)
95287 {
95288- struct k_clock process = {
95289+ static struct k_clock process = {
95290 .clock_getres = process_cpu_clock_getres,
95291 .clock_get = process_cpu_clock_get,
95292 .timer_create = process_cpu_timer_create,
95293 .nsleep = process_cpu_nsleep,
95294 .nsleep_restart = process_cpu_nsleep_restart,
95295 };
95296- struct k_clock thread = {
95297+ static struct k_clock thread = {
95298 .clock_getres = thread_cpu_clock_getres,
95299 .clock_get = thread_cpu_clock_get,
95300 .timer_create = thread_cpu_timer_create,
95301diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
95302index 31ea01f..7fc61ef 100644
95303--- a/kernel/time/posix-timers.c
95304+++ b/kernel/time/posix-timers.c
95305@@ -43,6 +43,7 @@
95306 #include <linux/hash.h>
95307 #include <linux/posix-clock.h>
95308 #include <linux/posix-timers.h>
95309+#include <linux/grsecurity.h>
95310 #include <linux/syscalls.h>
95311 #include <linux/wait.h>
95312 #include <linux/workqueue.h>
95313@@ -124,7 +125,7 @@ static DEFINE_SPINLOCK(hash_lock);
95314 * which we beg off on and pass to do_sys_settimeofday().
95315 */
95316
95317-static struct k_clock posix_clocks[MAX_CLOCKS];
95318+static struct k_clock *posix_clocks[MAX_CLOCKS];
95319
95320 /*
95321 * These ones are defined below.
95322@@ -277,7 +278,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
95323 */
95324 static __init int init_posix_timers(void)
95325 {
95326- struct k_clock clock_realtime = {
95327+ static struct k_clock clock_realtime = {
95328 .clock_getres = hrtimer_get_res,
95329 .clock_get = posix_clock_realtime_get,
95330 .clock_set = posix_clock_realtime_set,
95331@@ -289,7 +290,7 @@ static __init int init_posix_timers(void)
95332 .timer_get = common_timer_get,
95333 .timer_del = common_timer_del,
95334 };
95335- struct k_clock clock_monotonic = {
95336+ static struct k_clock clock_monotonic = {
95337 .clock_getres = hrtimer_get_res,
95338 .clock_get = posix_ktime_get_ts,
95339 .nsleep = common_nsleep,
95340@@ -299,19 +300,19 @@ static __init int init_posix_timers(void)
95341 .timer_get = common_timer_get,
95342 .timer_del = common_timer_del,
95343 };
95344- struct k_clock clock_monotonic_raw = {
95345+ static struct k_clock clock_monotonic_raw = {
95346 .clock_getres = hrtimer_get_res,
95347 .clock_get = posix_get_monotonic_raw,
95348 };
95349- struct k_clock clock_realtime_coarse = {
95350+ static struct k_clock clock_realtime_coarse = {
95351 .clock_getres = posix_get_coarse_res,
95352 .clock_get = posix_get_realtime_coarse,
95353 };
95354- struct k_clock clock_monotonic_coarse = {
95355+ static struct k_clock clock_monotonic_coarse = {
95356 .clock_getres = posix_get_coarse_res,
95357 .clock_get = posix_get_monotonic_coarse,
95358 };
95359- struct k_clock clock_tai = {
95360+ static struct k_clock clock_tai = {
95361 .clock_getres = hrtimer_get_res,
95362 .clock_get = posix_get_tai,
95363 .nsleep = common_nsleep,
95364@@ -321,7 +322,7 @@ static __init int init_posix_timers(void)
95365 .timer_get = common_timer_get,
95366 .timer_del = common_timer_del,
95367 };
95368- struct k_clock clock_boottime = {
95369+ static struct k_clock clock_boottime = {
95370 .clock_getres = hrtimer_get_res,
95371 .clock_get = posix_get_boottime,
95372 .nsleep = common_nsleep,
95373@@ -533,7 +534,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
95374 return;
95375 }
95376
95377- posix_clocks[clock_id] = *new_clock;
95378+ posix_clocks[clock_id] = new_clock;
95379 }
95380 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
95381
95382@@ -579,9 +580,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
95383 return (id & CLOCKFD_MASK) == CLOCKFD ?
95384 &clock_posix_dynamic : &clock_posix_cpu;
95385
95386- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
95387+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
95388 return NULL;
95389- return &posix_clocks[id];
95390+ return posix_clocks[id];
95391 }
95392
95393 static int common_timer_create(struct k_itimer *new_timer)
95394@@ -599,7 +600,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
95395 struct k_clock *kc = clockid_to_kclock(which_clock);
95396 struct k_itimer *new_timer;
95397 int error, new_timer_id;
95398- sigevent_t event;
95399+ sigevent_t event = { };
95400 int it_id_set = IT_ID_NOT_SET;
95401
95402 if (!kc)
95403@@ -1014,6 +1015,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
95404 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
95405 return -EFAULT;
95406
95407+ /* only the CLOCK_REALTIME clock can be set, all other clocks
95408+ have their clock_set fptr set to a nosettime dummy function
95409+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
95410+ call common_clock_set, which calls do_sys_settimeofday, which
95411+ we hook
95412+ */
95413+
95414 return kc->clock_set(which_clock, &new_tp);
95415 }
95416
95417diff --git a/kernel/time/time.c b/kernel/time/time.c
95418index 2c85b77..6530536 100644
95419--- a/kernel/time/time.c
95420+++ b/kernel/time/time.c
95421@@ -173,6 +173,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
95422 return error;
95423
95424 if (tz) {
95425+ /* we log in do_settimeofday called below, so don't log twice
95426+ */
95427+ if (!tv)
95428+ gr_log_timechange();
95429+
95430 sys_tz = *tz;
95431 update_vsyscall_tz();
95432 if (firsttime) {
95433diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
95434index 91db941..a371671 100644
95435--- a/kernel/time/timekeeping.c
95436+++ b/kernel/time/timekeeping.c
95437@@ -15,6 +15,7 @@
95438 #include <linux/init.h>
95439 #include <linux/mm.h>
95440 #include <linux/sched.h>
95441+#include <linux/grsecurity.h>
95442 #include <linux/syscore_ops.h>
95443 #include <linux/clocksource.h>
95444 #include <linux/jiffies.h>
95445@@ -802,6 +803,8 @@ int do_settimeofday64(const struct timespec64 *ts)
95446 if (!timespec64_valid_strict(ts))
95447 return -EINVAL;
95448
95449+ gr_log_timechange();
95450+
95451 raw_spin_lock_irqsave(&timekeeper_lock, flags);
95452 write_seqcount_begin(&tk_core.seq);
95453
95454diff --git a/kernel/time/timer.c b/kernel/time/timer.c
95455index 2d3f5c5..7ed7dc5 100644
95456--- a/kernel/time/timer.c
95457+++ b/kernel/time/timer.c
95458@@ -1393,7 +1393,7 @@ void update_process_times(int user_tick)
95459 /*
95460 * This function runs timers and the timer-tq in bottom half context.
95461 */
95462-static void run_timer_softirq(struct softirq_action *h)
95463+static __latent_entropy void run_timer_softirq(void)
95464 {
95465 struct tvec_base *base = __this_cpu_read(tvec_bases);
95466
95467@@ -1456,7 +1456,7 @@ static void process_timeout(unsigned long __data)
95468 *
95469 * In all cases the return value is guaranteed to be non-negative.
95470 */
95471-signed long __sched schedule_timeout(signed long timeout)
95472+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
95473 {
95474 struct timer_list timer;
95475 unsigned long expire;
95476diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
95477index 61ed862..3b52c65 100644
95478--- a/kernel/time/timer_list.c
95479+++ b/kernel/time/timer_list.c
95480@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
95481
95482 static void print_name_offset(struct seq_file *m, void *sym)
95483 {
95484+#ifdef CONFIG_GRKERNSEC_HIDESYM
95485+ SEQ_printf(m, "<%p>", NULL);
95486+#else
95487 char symname[KSYM_NAME_LEN];
95488
95489 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
95490 SEQ_printf(m, "<%pK>", sym);
95491 else
95492 SEQ_printf(m, "%s", symname);
95493+#endif
95494 }
95495
95496 static void
95497@@ -119,7 +123,11 @@ next_one:
95498 static void
95499 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
95500 {
95501+#ifdef CONFIG_GRKERNSEC_HIDESYM
95502+ SEQ_printf(m, " .base: %p\n", NULL);
95503+#else
95504 SEQ_printf(m, " .base: %pK\n", base);
95505+#endif
95506 SEQ_printf(m, " .index: %d\n",
95507 base->index);
95508 SEQ_printf(m, " .resolution: %Lu nsecs\n",
95509@@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void)
95510 {
95511 struct proc_dir_entry *pe;
95512
95513+#ifdef CONFIG_GRKERNSEC_PROC_ADD
95514+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
95515+#else
95516 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
95517+#endif
95518 if (!pe)
95519 return -ENOMEM;
95520 return 0;
95521diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
95522index 1fb08f2..ca4bb1e 100644
95523--- a/kernel/time/timer_stats.c
95524+++ b/kernel/time/timer_stats.c
95525@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
95526 static unsigned long nr_entries;
95527 static struct entry entries[MAX_ENTRIES];
95528
95529-static atomic_t overflow_count;
95530+static atomic_unchecked_t overflow_count;
95531
95532 /*
95533 * The entries are in a hash-table, for fast lookup:
95534@@ -140,7 +140,7 @@ static void reset_entries(void)
95535 nr_entries = 0;
95536 memset(entries, 0, sizeof(entries));
95537 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
95538- atomic_set(&overflow_count, 0);
95539+ atomic_set_unchecked(&overflow_count, 0);
95540 }
95541
95542 static struct entry *alloc_entry(void)
95543@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
95544 if (likely(entry))
95545 entry->count++;
95546 else
95547- atomic_inc(&overflow_count);
95548+ atomic_inc_unchecked(&overflow_count);
95549
95550 out_unlock:
95551 raw_spin_unlock_irqrestore(lock, flags);
95552@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
95553
95554 static void print_name_offset(struct seq_file *m, unsigned long addr)
95555 {
95556+#ifdef CONFIG_GRKERNSEC_HIDESYM
95557+ seq_printf(m, "<%p>", NULL);
95558+#else
95559 char symname[KSYM_NAME_LEN];
95560
95561 if (lookup_symbol_name(addr, symname) < 0)
95562- seq_printf(m, "<%p>", (void *)addr);
95563+ seq_printf(m, "<%pK>", (void *)addr);
95564 else
95565 seq_printf(m, "%s", symname);
95566+#endif
95567 }
95568
95569 static int tstats_show(struct seq_file *m, void *v)
95570@@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *m, void *v)
95571
95572 seq_puts(m, "Timer Stats Version: v0.3\n");
95573 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
95574- if (atomic_read(&overflow_count))
95575- seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
95576+ if (atomic_read_unchecked(&overflow_count))
95577+ seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count));
95578 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
95579
95580 for (i = 0; i < nr_entries; i++) {
95581@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
95582 {
95583 struct proc_dir_entry *pe;
95584
95585+#ifdef CONFIG_GRKERNSEC_PROC_ADD
95586+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
95587+#else
95588 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
95589+#endif
95590 if (!pe)
95591 return -ENOMEM;
95592 return 0;
95593diff --git a/kernel/torture.c b/kernel/torture.c
95594index dd70993..0bf694b 100644
95595--- a/kernel/torture.c
95596+++ b/kernel/torture.c
95597@@ -482,7 +482,7 @@ static int torture_shutdown_notify(struct notifier_block *unused1,
95598 mutex_lock(&fullstop_mutex);
95599 if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
95600 VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
95601- ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN;
95602+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_SHUTDOWN;
95603 } else {
95604 pr_warn("Concurrent rmmod and shutdown illegal!\n");
95605 }
95606@@ -549,14 +549,14 @@ static int torture_stutter(void *arg)
95607 if (!torture_must_stop()) {
95608 if (stutter > 1) {
95609 schedule_timeout_interruptible(stutter - 1);
95610- ACCESS_ONCE(stutter_pause_test) = 2;
95611+ ACCESS_ONCE_RW(stutter_pause_test) = 2;
95612 }
95613 schedule_timeout_interruptible(1);
95614- ACCESS_ONCE(stutter_pause_test) = 1;
95615+ ACCESS_ONCE_RW(stutter_pause_test) = 1;
95616 }
95617 if (!torture_must_stop())
95618 schedule_timeout_interruptible(stutter);
95619- ACCESS_ONCE(stutter_pause_test) = 0;
95620+ ACCESS_ONCE_RW(stutter_pause_test) = 0;
95621 torture_shutdown_absorb("torture_stutter");
95622 } while (!torture_must_stop());
95623 torture_kthread_stopping("torture_stutter");
95624@@ -648,7 +648,7 @@ bool torture_cleanup_begin(void)
95625 schedule_timeout_uninterruptible(10);
95626 return true;
95627 }
95628- ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD;
95629+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_RMMOD;
95630 mutex_unlock(&fullstop_mutex);
95631 torture_shutdown_cleanup();
95632 torture_shuffle_cleanup();
95633diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
95634index 483cecf..ac46091 100644
95635--- a/kernel/trace/blktrace.c
95636+++ b/kernel/trace/blktrace.c
95637@@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
95638 struct blk_trace *bt = filp->private_data;
95639 char buf[16];
95640
95641- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
95642+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
95643
95644 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
95645 }
95646@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
95647 return 1;
95648
95649 bt = buf->chan->private_data;
95650- atomic_inc(&bt->dropped);
95651+ atomic_inc_unchecked(&bt->dropped);
95652 return 0;
95653 }
95654
95655@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
95656
95657 bt->dir = dir;
95658 bt->dev = dev;
95659- atomic_set(&bt->dropped, 0);
95660+ atomic_set_unchecked(&bt->dropped, 0);
95661 INIT_LIST_HEAD(&bt->running_list);
95662
95663 ret = -EIO;
95664diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
95665index 4f22802..bd268b1 100644
95666--- a/kernel/trace/ftrace.c
95667+++ b/kernel/trace/ftrace.c
95668@@ -2382,12 +2382,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
95669 if (unlikely(ftrace_disabled))
95670 return 0;
95671
95672+ ret = ftrace_arch_code_modify_prepare();
95673+ FTRACE_WARN_ON(ret);
95674+ if (ret)
95675+ return 0;
95676+
95677 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
95678+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
95679 if (ret) {
95680 ftrace_bug(ret, rec);
95681- return 0;
95682 }
95683- return 1;
95684+ return ret ? 0 : 1;
95685 }
95686
95687 /*
95688@@ -4776,8 +4781,10 @@ static int ftrace_process_locs(struct module *mod,
95689 if (!count)
95690 return 0;
95691
95692+ pax_open_kernel();
95693 sort(start, count, sizeof(*start),
95694 ftrace_cmp_ips, ftrace_swap_ips);
95695+ pax_close_kernel();
95696
95697 start_pg = ftrace_allocate_pages(count);
95698 if (!start_pg)
95699@@ -5653,7 +5660,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
95700
95701 if (t->ret_stack == NULL) {
95702 atomic_set(&t->tracing_graph_pause, 0);
95703- atomic_set(&t->trace_overrun, 0);
95704+ atomic_set_unchecked(&t->trace_overrun, 0);
95705 t->curr_ret_stack = -1;
95706 /* Make sure the tasks see the -1 first: */
95707 smp_wmb();
95708@@ -5876,7 +5883,7 @@ static void
95709 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
95710 {
95711 atomic_set(&t->tracing_graph_pause, 0);
95712- atomic_set(&t->trace_overrun, 0);
95713+ atomic_set_unchecked(&t->trace_overrun, 0);
95714 t->ftrace_timestamp = 0;
95715 /* make curr_ret_stack visible before we add the ret_stack */
95716 smp_wmb();
95717diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
95718index 922048a..bb71a55 100644
95719--- a/kernel/trace/ring_buffer.c
95720+++ b/kernel/trace/ring_buffer.c
95721@@ -348,9 +348,9 @@ struct buffer_data_page {
95722 */
95723 struct buffer_page {
95724 struct list_head list; /* list of buffer pages */
95725- local_t write; /* index for next write */
95726+ local_unchecked_t write; /* index for next write */
95727 unsigned read; /* index for next read */
95728- local_t entries; /* entries on this page */
95729+ local_unchecked_t entries; /* entries on this page */
95730 unsigned long real_end; /* real end of data */
95731 struct buffer_data_page *page; /* Actual data page */
95732 };
95733@@ -471,11 +471,11 @@ struct ring_buffer_per_cpu {
95734 unsigned long last_overrun;
95735 local_t entries_bytes;
95736 local_t entries;
95737- local_t overrun;
95738- local_t commit_overrun;
95739- local_t dropped_events;
95740+ local_unchecked_t overrun;
95741+ local_unchecked_t commit_overrun;
95742+ local_unchecked_t dropped_events;
95743 local_t committing;
95744- local_t commits;
95745+ local_unchecked_t commits;
95746 unsigned long read;
95747 unsigned long read_bytes;
95748 u64 write_stamp;
95749@@ -1045,8 +1045,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
95750 *
95751 * We add a counter to the write field to denote this.
95752 */
95753- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
95754- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
95755+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
95756+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
95757
95758 /*
95759 * Just make sure we have seen our old_write and synchronize
95760@@ -1074,8 +1074,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
95761 * cmpxchg to only update if an interrupt did not already
95762 * do it for us. If the cmpxchg fails, we don't care.
95763 */
95764- (void)local_cmpxchg(&next_page->write, old_write, val);
95765- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
95766+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
95767+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
95768
95769 /*
95770 * No need to worry about races with clearing out the commit.
95771@@ -1443,12 +1443,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
95772
95773 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
95774 {
95775- return local_read(&bpage->entries) & RB_WRITE_MASK;
95776+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
95777 }
95778
95779 static inline unsigned long rb_page_write(struct buffer_page *bpage)
95780 {
95781- return local_read(&bpage->write) & RB_WRITE_MASK;
95782+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
95783 }
95784
95785 static int
95786@@ -1543,7 +1543,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
95787 * bytes consumed in ring buffer from here.
95788 * Increment overrun to account for the lost events.
95789 */
95790- local_add(page_entries, &cpu_buffer->overrun);
95791+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
95792 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
95793 }
95794
95795@@ -2105,7 +2105,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
95796 * it is our responsibility to update
95797 * the counters.
95798 */
95799- local_add(entries, &cpu_buffer->overrun);
95800+ local_add_unchecked(entries, &cpu_buffer->overrun);
95801 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
95802
95803 /*
95804@@ -2255,7 +2255,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
95805 if (tail == BUF_PAGE_SIZE)
95806 tail_page->real_end = 0;
95807
95808- local_sub(length, &tail_page->write);
95809+ local_sub_unchecked(length, &tail_page->write);
95810 return;
95811 }
95812
95813@@ -2290,7 +2290,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
95814 rb_event_set_padding(event);
95815
95816 /* Set the write back to the previous setting */
95817- local_sub(length, &tail_page->write);
95818+ local_sub_unchecked(length, &tail_page->write);
95819 return;
95820 }
95821
95822@@ -2302,7 +2302,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
95823
95824 /* Set write to end of buffer */
95825 length = (tail + length) - BUF_PAGE_SIZE;
95826- local_sub(length, &tail_page->write);
95827+ local_sub_unchecked(length, &tail_page->write);
95828 }
95829
95830 /*
95831@@ -2328,7 +2328,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
95832 * about it.
95833 */
95834 if (unlikely(next_page == commit_page)) {
95835- local_inc(&cpu_buffer->commit_overrun);
95836+ local_inc_unchecked(&cpu_buffer->commit_overrun);
95837 goto out_reset;
95838 }
95839
95840@@ -2358,7 +2358,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
95841 * this is easy, just stop here.
95842 */
95843 if (!(buffer->flags & RB_FL_OVERWRITE)) {
95844- local_inc(&cpu_buffer->dropped_events);
95845+ local_inc_unchecked(&cpu_buffer->dropped_events);
95846 goto out_reset;
95847 }
95848
95849@@ -2384,7 +2384,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
95850 cpu_buffer->tail_page) &&
95851 (cpu_buffer->commit_page ==
95852 cpu_buffer->reader_page))) {
95853- local_inc(&cpu_buffer->commit_overrun);
95854+ local_inc_unchecked(&cpu_buffer->commit_overrun);
95855 goto out_reset;
95856 }
95857 }
95858@@ -2432,7 +2432,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
95859 length += RB_LEN_TIME_EXTEND;
95860
95861 tail_page = cpu_buffer->tail_page;
95862- write = local_add_return(length, &tail_page->write);
95863+ write = local_add_return_unchecked(length, &tail_page->write);
95864
95865 /* set write to only the index of the write */
95866 write &= RB_WRITE_MASK;
95867@@ -2456,7 +2456,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
95868 kmemcheck_annotate_bitfield(event, bitfield);
95869 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
95870
95871- local_inc(&tail_page->entries);
95872+ local_inc_unchecked(&tail_page->entries);
95873
95874 /*
95875 * If this is the first commit on the page, then update
95876@@ -2489,7 +2489,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
95877
95878 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
95879 unsigned long write_mask =
95880- local_read(&bpage->write) & ~RB_WRITE_MASK;
95881+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
95882 unsigned long event_length = rb_event_length(event);
95883 /*
95884 * This is on the tail page. It is possible that
95885@@ -2499,7 +2499,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
95886 */
95887 old_index += write_mask;
95888 new_index += write_mask;
95889- index = local_cmpxchg(&bpage->write, old_index, new_index);
95890+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
95891 if (index == old_index) {
95892 /* update counters */
95893 local_sub(event_length, &cpu_buffer->entries_bytes);
95894@@ -2514,7 +2514,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
95895 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
95896 {
95897 local_inc(&cpu_buffer->committing);
95898- local_inc(&cpu_buffer->commits);
95899+ local_inc_unchecked(&cpu_buffer->commits);
95900 }
95901
95902 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
95903@@ -2526,7 +2526,7 @@ static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
95904 return;
95905
95906 again:
95907- commits = local_read(&cpu_buffer->commits);
95908+ commits = local_read_unchecked(&cpu_buffer->commits);
95909 /* synchronize with interrupts */
95910 barrier();
95911 if (local_read(&cpu_buffer->committing) == 1)
95912@@ -2542,7 +2542,7 @@ static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
95913 * updating of the commit page and the clearing of the
95914 * committing counter.
95915 */
95916- if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
95917+ if (unlikely(local_read_unchecked(&cpu_buffer->commits) != commits) &&
95918 !local_read(&cpu_buffer->committing)) {
95919 local_inc(&cpu_buffer->committing);
95920 goto again;
95921@@ -2572,7 +2572,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
95922 barrier();
95923 if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
95924 local_dec(&cpu_buffer->committing);
95925- local_dec(&cpu_buffer->commits);
95926+ local_dec_unchecked(&cpu_buffer->commits);
95927 return NULL;
95928 }
95929 #endif
95930@@ -2901,7 +2901,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
95931
95932 /* Do the likely case first */
95933 if (likely(bpage->page == (void *)addr)) {
95934- local_dec(&bpage->entries);
95935+ local_dec_unchecked(&bpage->entries);
95936 return;
95937 }
95938
95939@@ -2913,7 +2913,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
95940 start = bpage;
95941 do {
95942 if (bpage->page == (void *)addr) {
95943- local_dec(&bpage->entries);
95944+ local_dec_unchecked(&bpage->entries);
95945 return;
95946 }
95947 rb_inc_page(cpu_buffer, &bpage);
95948@@ -3197,7 +3197,7 @@ static inline unsigned long
95949 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
95950 {
95951 return local_read(&cpu_buffer->entries) -
95952- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
95953+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
95954 }
95955
95956 /**
95957@@ -3286,7 +3286,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
95958 return 0;
95959
95960 cpu_buffer = buffer->buffers[cpu];
95961- ret = local_read(&cpu_buffer->overrun);
95962+ ret = local_read_unchecked(&cpu_buffer->overrun);
95963
95964 return ret;
95965 }
95966@@ -3309,7 +3309,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
95967 return 0;
95968
95969 cpu_buffer = buffer->buffers[cpu];
95970- ret = local_read(&cpu_buffer->commit_overrun);
95971+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
95972
95973 return ret;
95974 }
95975@@ -3331,7 +3331,7 @@ ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
95976 return 0;
95977
95978 cpu_buffer = buffer->buffers[cpu];
95979- ret = local_read(&cpu_buffer->dropped_events);
95980+ ret = local_read_unchecked(&cpu_buffer->dropped_events);
95981
95982 return ret;
95983 }
95984@@ -3394,7 +3394,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
95985 /* if you care about this being correct, lock the buffer */
95986 for_each_buffer_cpu(buffer, cpu) {
95987 cpu_buffer = buffer->buffers[cpu];
95988- overruns += local_read(&cpu_buffer->overrun);
95989+ overruns += local_read_unchecked(&cpu_buffer->overrun);
95990 }
95991
95992 return overruns;
95993@@ -3565,8 +3565,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
95994 /*
95995 * Reset the reader page to size zero.
95996 */
95997- local_set(&cpu_buffer->reader_page->write, 0);
95998- local_set(&cpu_buffer->reader_page->entries, 0);
95999+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
96000+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
96001 local_set(&cpu_buffer->reader_page->page->commit, 0);
96002 cpu_buffer->reader_page->real_end = 0;
96003
96004@@ -3600,7 +3600,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
96005 * want to compare with the last_overrun.
96006 */
96007 smp_mb();
96008- overwrite = local_read(&(cpu_buffer->overrun));
96009+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
96010
96011 /*
96012 * Here's the tricky part.
96013@@ -4172,8 +4172,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
96014
96015 cpu_buffer->head_page
96016 = list_entry(cpu_buffer->pages, struct buffer_page, list);
96017- local_set(&cpu_buffer->head_page->write, 0);
96018- local_set(&cpu_buffer->head_page->entries, 0);
96019+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
96020+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
96021 local_set(&cpu_buffer->head_page->page->commit, 0);
96022
96023 cpu_buffer->head_page->read = 0;
96024@@ -4183,18 +4183,18 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
96025
96026 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
96027 INIT_LIST_HEAD(&cpu_buffer->new_pages);
96028- local_set(&cpu_buffer->reader_page->write, 0);
96029- local_set(&cpu_buffer->reader_page->entries, 0);
96030+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
96031+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
96032 local_set(&cpu_buffer->reader_page->page->commit, 0);
96033 cpu_buffer->reader_page->read = 0;
96034
96035 local_set(&cpu_buffer->entries_bytes, 0);
96036- local_set(&cpu_buffer->overrun, 0);
96037- local_set(&cpu_buffer->commit_overrun, 0);
96038- local_set(&cpu_buffer->dropped_events, 0);
96039+ local_set_unchecked(&cpu_buffer->overrun, 0);
96040+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
96041+ local_set_unchecked(&cpu_buffer->dropped_events, 0);
96042 local_set(&cpu_buffer->entries, 0);
96043 local_set(&cpu_buffer->committing, 0);
96044- local_set(&cpu_buffer->commits, 0);
96045+ local_set_unchecked(&cpu_buffer->commits, 0);
96046 cpu_buffer->read = 0;
96047 cpu_buffer->read_bytes = 0;
96048
96049@@ -4595,8 +4595,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
96050 rb_init_page(bpage);
96051 bpage = reader->page;
96052 reader->page = *data_page;
96053- local_set(&reader->write, 0);
96054- local_set(&reader->entries, 0);
96055+ local_set_unchecked(&reader->write, 0);
96056+ local_set_unchecked(&reader->entries, 0);
96057 reader->read = 0;
96058 *data_page = bpage;
96059
96060diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
96061index 62c6506..5c25989 100644
96062--- a/kernel/trace/trace.c
96063+++ b/kernel/trace/trace.c
96064@@ -3500,7 +3500,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
96065 return 0;
96066 }
96067
96068-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
96069+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
96070 {
96071 /* do nothing if flag is already set */
96072 if (!!(trace_flags & mask) == !!enabled)
96073diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
96074index dd8205a..1aae87a 100644
96075--- a/kernel/trace/trace.h
96076+++ b/kernel/trace/trace.h
96077@@ -1271,7 +1271,7 @@ extern const char *__stop___tracepoint_str[];
96078 void trace_printk_init_buffers(void);
96079 void trace_printk_start_comm(void);
96080 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
96081-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
96082+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
96083
96084 /*
96085 * Normal trace_printk() and friends allocates special buffers
96086diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
96087index 57b67b1..66082a9 100644
96088--- a/kernel/trace/trace_clock.c
96089+++ b/kernel/trace/trace_clock.c
96090@@ -124,7 +124,7 @@ u64 notrace trace_clock_global(void)
96091 return now;
96092 }
96093
96094-static atomic64_t trace_counter;
96095+static atomic64_unchecked_t trace_counter;
96096
96097 /*
96098 * trace_clock_counter(): simply an atomic counter.
96099@@ -133,5 +133,5 @@ static atomic64_t trace_counter;
96100 */
96101 u64 notrace trace_clock_counter(void)
96102 {
96103- return atomic64_add_return(1, &trace_counter);
96104+ return atomic64_inc_return_unchecked(&trace_counter);
96105 }
96106diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
96107index a9c10a3..1864f6b 100644
96108--- a/kernel/trace/trace_events.c
96109+++ b/kernel/trace/trace_events.c
96110@@ -1762,7 +1762,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
96111 return 0;
96112 }
96113
96114-struct ftrace_module_file_ops;
96115 static void __add_event_to_tracers(struct ftrace_event_call *call);
96116
96117 /* Add an additional event_call dynamically */
96118diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
96119index b6fce36..d9f11a3 100644
96120--- a/kernel/trace/trace_functions_graph.c
96121+++ b/kernel/trace/trace_functions_graph.c
96122@@ -133,7 +133,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
96123
96124 /* The return trace stack is full */
96125 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
96126- atomic_inc(&current->trace_overrun);
96127+ atomic_inc_unchecked(&current->trace_overrun);
96128 return -EBUSY;
96129 }
96130
96131@@ -230,7 +230,7 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
96132 *ret = current->ret_stack[index].ret;
96133 trace->func = current->ret_stack[index].func;
96134 trace->calltime = current->ret_stack[index].calltime;
96135- trace->overrun = atomic_read(&current->trace_overrun);
96136+ trace->overrun = atomic_read_unchecked(&current->trace_overrun);
96137 trace->depth = index;
96138 }
96139
96140diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
96141index 7a9ba62..2e0e4a1 100644
96142--- a/kernel/trace/trace_mmiotrace.c
96143+++ b/kernel/trace/trace_mmiotrace.c
96144@@ -24,7 +24,7 @@ struct header_iter {
96145 static struct trace_array *mmio_trace_array;
96146 static bool overrun_detected;
96147 static unsigned long prev_overruns;
96148-static atomic_t dropped_count;
96149+static atomic_unchecked_t dropped_count;
96150
96151 static void mmio_reset_data(struct trace_array *tr)
96152 {
96153@@ -124,7 +124,7 @@ static void mmio_close(struct trace_iterator *iter)
96154
96155 static unsigned long count_overruns(struct trace_iterator *iter)
96156 {
96157- unsigned long cnt = atomic_xchg(&dropped_count, 0);
96158+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
96159 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
96160
96161 if (over > prev_overruns)
96162@@ -307,7 +307,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
96163 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
96164 sizeof(*entry), 0, pc);
96165 if (!event) {
96166- atomic_inc(&dropped_count);
96167+ atomic_inc_unchecked(&dropped_count);
96168 return;
96169 }
96170 entry = ring_buffer_event_data(event);
96171@@ -337,7 +337,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
96172 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
96173 sizeof(*entry), 0, pc);
96174 if (!event) {
96175- atomic_inc(&dropped_count);
96176+ atomic_inc_unchecked(&dropped_count);
96177 return;
96178 }
96179 entry = ring_buffer_event_data(event);
96180diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
96181index 692bf71..6d9a9cd 100644
96182--- a/kernel/trace/trace_output.c
96183+++ b/kernel/trace/trace_output.c
96184@@ -751,14 +751,16 @@ int register_ftrace_event(struct trace_event *event)
96185 goto out;
96186 }
96187
96188+ pax_open_kernel();
96189 if (event->funcs->trace == NULL)
96190- event->funcs->trace = trace_nop_print;
96191+ *(void **)&event->funcs->trace = trace_nop_print;
96192 if (event->funcs->raw == NULL)
96193- event->funcs->raw = trace_nop_print;
96194+ *(void **)&event->funcs->raw = trace_nop_print;
96195 if (event->funcs->hex == NULL)
96196- event->funcs->hex = trace_nop_print;
96197+ *(void **)&event->funcs->hex = trace_nop_print;
96198 if (event->funcs->binary == NULL)
96199- event->funcs->binary = trace_nop_print;
96200+ *(void **)&event->funcs->binary = trace_nop_print;
96201+ pax_close_kernel();
96202
96203 key = event->type & (EVENT_HASHSIZE - 1);
96204
96205diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
96206index e694c9f..6775a38 100644
96207--- a/kernel/trace/trace_seq.c
96208+++ b/kernel/trace/trace_seq.c
96209@@ -337,7 +337,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
96210 return 0;
96211 }
96212
96213- seq_buf_path(&s->seq, path, "\n");
96214+ seq_buf_path(&s->seq, path, "\n\\");
96215
96216 if (unlikely(seq_buf_has_overflowed(&s->seq))) {
96217 s->seq.len = save_len;
96218diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
96219index c3e4fcf..ef6cc43 100644
96220--- a/kernel/trace/trace_stack.c
96221+++ b/kernel/trace/trace_stack.c
96222@@ -88,7 +88,7 @@ check_stack(unsigned long ip, unsigned long *stack)
96223 return;
96224
96225 /* we do not handle interrupt stacks yet */
96226- if (!object_is_on_stack(stack))
96227+ if (!object_starts_on_stack(stack))
96228 return;
96229
96230 local_irq_save(flags);
96231diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
96232index f97f6e3..d367b48 100644
96233--- a/kernel/trace/trace_syscalls.c
96234+++ b/kernel/trace/trace_syscalls.c
96235@@ -590,6 +590,8 @@ static int perf_sysenter_enable(struct ftrace_event_call *call)
96236 int num;
96237
96238 num = ((struct syscall_metadata *)call->data)->syscall_nr;
96239+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
96240+ return -EINVAL;
96241
96242 mutex_lock(&syscall_trace_lock);
96243 if (!sys_perf_refcount_enter)
96244@@ -610,6 +612,8 @@ static void perf_sysenter_disable(struct ftrace_event_call *call)
96245 int num;
96246
96247 num = ((struct syscall_metadata *)call->data)->syscall_nr;
96248+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
96249+ return;
96250
96251 mutex_lock(&syscall_trace_lock);
96252 sys_perf_refcount_enter--;
96253@@ -662,6 +666,8 @@ static int perf_sysexit_enable(struct ftrace_event_call *call)
96254 int num;
96255
96256 num = ((struct syscall_metadata *)call->data)->syscall_nr;
96257+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
96258+ return -EINVAL;
96259
96260 mutex_lock(&syscall_trace_lock);
96261 if (!sys_perf_refcount_exit)
96262@@ -682,6 +688,8 @@ static void perf_sysexit_disable(struct ftrace_event_call *call)
96263 int num;
96264
96265 num = ((struct syscall_metadata *)call->data)->syscall_nr;
96266+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
96267+ return;
96268
96269 mutex_lock(&syscall_trace_lock);
96270 sys_perf_refcount_exit--;
96271diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
96272index 4109f83..fe1f830 100644
96273--- a/kernel/user_namespace.c
96274+++ b/kernel/user_namespace.c
96275@@ -83,6 +83,21 @@ int create_user_ns(struct cred *new)
96276 !kgid_has_mapping(parent_ns, group))
96277 return -EPERM;
96278
96279+#ifdef CONFIG_GRKERNSEC
96280+ /*
96281+ * This doesn't really inspire confidence:
96282+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
96283+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
96284+ * Increases kernel attack surface in areas developers
96285+ * previously cared little about ("low importance due
96286+ * to requiring "root" capability")
96287+ * To be removed when this code receives *proper* review
96288+ */
96289+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
96290+ !capable(CAP_SETGID))
96291+ return -EPERM;
96292+#endif
96293+
96294 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
96295 if (!ns)
96296 return -ENOMEM;
96297@@ -980,7 +995,7 @@ static int userns_install(struct nsproxy *nsproxy, struct ns_common *ns)
96298 if (atomic_read(&current->mm->mm_users) > 1)
96299 return -EINVAL;
96300
96301- if (current->fs->users != 1)
96302+ if (atomic_read(&current->fs->users) != 1)
96303 return -EINVAL;
96304
96305 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
96306diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
96307index c8eac43..4b5f08f 100644
96308--- a/kernel/utsname_sysctl.c
96309+++ b/kernel/utsname_sysctl.c
96310@@ -47,7 +47,7 @@ static void put_uts(struct ctl_table *table, int write, void *which)
96311 static int proc_do_uts_string(struct ctl_table *table, int write,
96312 void __user *buffer, size_t *lenp, loff_t *ppos)
96313 {
96314- struct ctl_table uts_table;
96315+ ctl_table_no_const uts_table;
96316 int r;
96317 memcpy(&uts_table, table, sizeof(uts_table));
96318 uts_table.data = get_uts(table, write);
96319diff --git a/kernel/watchdog.c b/kernel/watchdog.c
96320index 3174bf8..3553520 100644
96321--- a/kernel/watchdog.c
96322+++ b/kernel/watchdog.c
96323@@ -572,7 +572,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
96324 static void watchdog_nmi_disable(unsigned int cpu) { return; }
96325 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
96326
96327-static struct smp_hotplug_thread watchdog_threads = {
96328+static struct smp_hotplug_thread watchdog_threads __read_only = {
96329 .store = &softlockup_watchdog,
96330 .thread_should_run = watchdog_should_run,
96331 .thread_fn = watchdog,
96332diff --git a/kernel/workqueue.c b/kernel/workqueue.c
96333index 41ff75b..5ad683a 100644
96334--- a/kernel/workqueue.c
96335+++ b/kernel/workqueue.c
96336@@ -4564,7 +4564,7 @@ static void rebind_workers(struct worker_pool *pool)
96337 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
96338 worker_flags |= WORKER_REBOUND;
96339 worker_flags &= ~WORKER_UNBOUND;
96340- ACCESS_ONCE(worker->flags) = worker_flags;
96341+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
96342 }
96343
96344 spin_unlock_irq(&pool->lock);
96345diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
96346index c5cefb3..a4241e3 100644
96347--- a/lib/Kconfig.debug
96348+++ b/lib/Kconfig.debug
96349@@ -923,7 +923,7 @@ config DEBUG_MUTEXES
96350
96351 config DEBUG_WW_MUTEX_SLOWPATH
96352 bool "Wait/wound mutex debugging: Slowpath testing"
96353- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
96354+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
96355 select DEBUG_LOCK_ALLOC
96356 select DEBUG_SPINLOCK
96357 select DEBUG_MUTEXES
96358@@ -940,7 +940,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
96359
96360 config DEBUG_LOCK_ALLOC
96361 bool "Lock debugging: detect incorrect freeing of live locks"
96362- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
96363+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
96364 select DEBUG_SPINLOCK
96365 select DEBUG_MUTEXES
96366 select LOCKDEP
96367@@ -954,7 +954,7 @@ config DEBUG_LOCK_ALLOC
96368
96369 config PROVE_LOCKING
96370 bool "Lock debugging: prove locking correctness"
96371- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
96372+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
96373 select LOCKDEP
96374 select DEBUG_SPINLOCK
96375 select DEBUG_MUTEXES
96376@@ -1005,7 +1005,7 @@ config LOCKDEP
96377
96378 config LOCK_STAT
96379 bool "Lock usage statistics"
96380- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
96381+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
96382 select LOCKDEP
96383 select DEBUG_SPINLOCK
96384 select DEBUG_MUTEXES
96385@@ -1467,6 +1467,7 @@ config LATENCYTOP
96386 depends on DEBUG_KERNEL
96387 depends on STACKTRACE_SUPPORT
96388 depends on PROC_FS
96389+ depends on !GRKERNSEC_HIDESYM
96390 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
96391 select KALLSYMS
96392 select KALLSYMS_ALL
96393@@ -1483,7 +1484,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
96394 config DEBUG_STRICT_USER_COPY_CHECKS
96395 bool "Strict user copy size checks"
96396 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
96397- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
96398+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
96399 help
96400 Enabling this option turns a certain set of sanity checks for user
96401 copy operations into compile time failures.
96402@@ -1614,7 +1615,7 @@ endmenu # runtime tests
96403
96404 config PROVIDE_OHCI1394_DMA_INIT
96405 bool "Remote debugging over FireWire early on boot"
96406- depends on PCI && X86
96407+ depends on PCI && X86 && !GRKERNSEC
96408 help
96409 If you want to debug problems which hang or crash the kernel early
96410 on boot and the crashing machine has a FireWire port, you can use
96411diff --git a/lib/Makefile b/lib/Makefile
96412index 58f74d2..08e011f 100644
96413--- a/lib/Makefile
96414+++ b/lib/Makefile
96415@@ -59,7 +59,7 @@ obj-$(CONFIG_BTREE) += btree.o
96416 obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
96417 obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
96418 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
96419-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
96420+obj-y += list_debug.o
96421 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
96422
96423 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
96424diff --git a/lib/average.c b/lib/average.c
96425index 114d1be..ab0350c 100644
96426--- a/lib/average.c
96427+++ b/lib/average.c
96428@@ -55,7 +55,7 @@ struct ewma *ewma_add(struct ewma *avg, unsigned long val)
96429 {
96430 unsigned long internal = ACCESS_ONCE(avg->internal);
96431
96432- ACCESS_ONCE(avg->internal) = internal ?
96433+ ACCESS_ONCE_RW(avg->internal) = internal ?
96434 (((internal << avg->weight) - internal) +
96435 (val << avg->factor)) >> avg->weight :
96436 (val << avg->factor);
96437diff --git a/lib/bitmap.c b/lib/bitmap.c
96438index d456f4c1..29a0308 100644
96439--- a/lib/bitmap.c
96440+++ b/lib/bitmap.c
96441@@ -264,7 +264,7 @@ int __bitmap_subset(const unsigned long *bitmap1,
96442 }
96443 EXPORT_SYMBOL(__bitmap_subset);
96444
96445-int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
96446+int __intentional_overflow(-1) __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
96447 {
96448 unsigned int k, lim = bits/BITS_PER_LONG;
96449 int w = 0;
96450@@ -391,7 +391,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
96451 {
96452 int c, old_c, totaldigits, ndigits, nchunks, nbits;
96453 u32 chunk;
96454- const char __user __force *ubuf = (const char __user __force *)buf;
96455+ const char __user *ubuf = (const char __force_user *)buf;
96456
96457 bitmap_zero(maskp, nmaskbits);
96458
96459@@ -476,7 +476,7 @@ int bitmap_parse_user(const char __user *ubuf,
96460 {
96461 if (!access_ok(VERIFY_READ, ubuf, ulen))
96462 return -EFAULT;
96463- return __bitmap_parse((const char __force *)ubuf,
96464+ return __bitmap_parse((const char __force_kernel *)ubuf,
96465 ulen, 1, maskp, nmaskbits);
96466
96467 }
96468@@ -535,7 +535,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
96469 {
96470 unsigned a, b;
96471 int c, old_c, totaldigits;
96472- const char __user __force *ubuf = (const char __user __force *)buf;
96473+ const char __user *ubuf = (const char __force_user *)buf;
96474 int exp_digit, in_range;
96475
96476 totaldigits = c = 0;
96477@@ -630,7 +630,7 @@ int bitmap_parselist_user(const char __user *ubuf,
96478 {
96479 if (!access_ok(VERIFY_READ, ubuf, ulen))
96480 return -EFAULT;
96481- return __bitmap_parselist((const char __force *)ubuf,
96482+ return __bitmap_parselist((const char __force_kernel *)ubuf,
96483 ulen, 1, maskp, nmaskbits);
96484 }
96485 EXPORT_SYMBOL(bitmap_parselist_user);
96486diff --git a/lib/bug.c b/lib/bug.c
96487index 0c3bd95..5a615a1 100644
96488--- a/lib/bug.c
96489+++ b/lib/bug.c
96490@@ -145,6 +145,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
96491 return BUG_TRAP_TYPE_NONE;
96492
96493 bug = find_bug(bugaddr);
96494+ if (!bug)
96495+ return BUG_TRAP_TYPE_NONE;
96496
96497 file = NULL;
96498 line = 0;
96499diff --git a/lib/debugobjects.c b/lib/debugobjects.c
96500index 547f7f9..a6d4ba0 100644
96501--- a/lib/debugobjects.c
96502+++ b/lib/debugobjects.c
96503@@ -289,7 +289,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
96504 if (limit > 4)
96505 return;
96506
96507- is_on_stack = object_is_on_stack(addr);
96508+ is_on_stack = object_starts_on_stack(addr);
96509 if (is_on_stack == onstack)
96510 return;
96511
96512diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
96513index 6dd0335..1e9c239 100644
96514--- a/lib/decompress_bunzip2.c
96515+++ b/lib/decompress_bunzip2.c
96516@@ -665,7 +665,8 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, long len,
96517
96518 /* Fourth byte (ascii '1'-'9'), indicates block size in units of 100k of
96519 uncompressed data. Allocate intermediate buffer for block. */
96520- bd->dbufSize = 100000*(i-BZh0);
96521+ i -= BZh0;
96522+ bd->dbufSize = 100000 * i;
96523
96524 bd->dbuf = large_malloc(bd->dbufSize * sizeof(int));
96525 if (!bd->dbuf)
96526diff --git a/lib/div64.c b/lib/div64.c
96527index 4382ad7..08aa558 100644
96528--- a/lib/div64.c
96529+++ b/lib/div64.c
96530@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
96531 EXPORT_SYMBOL(__div64_32);
96532
96533 #ifndef div_s64_rem
96534-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
96535+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
96536 {
96537 u64 quotient;
96538
96539@@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
96540 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
96541 */
96542 #ifndef div64_u64
96543-u64 div64_u64(u64 dividend, u64 divisor)
96544+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
96545 {
96546 u32 high = divisor >> 32;
96547 u64 quot;
96548diff --git a/lib/dma-debug.c b/lib/dma-debug.c
96549index 9722bd2..0d826f4 100644
96550--- a/lib/dma-debug.c
96551+++ b/lib/dma-debug.c
96552@@ -979,7 +979,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
96553
96554 void dma_debug_add_bus(struct bus_type *bus)
96555 {
96556- struct notifier_block *nb;
96557+ notifier_block_no_const *nb;
96558
96559 if (dma_debug_disabled())
96560 return;
96561@@ -1161,7 +1161,7 @@ static void check_unmap(struct dma_debug_entry *ref)
96562
96563 static void check_for_stack(struct device *dev, void *addr)
96564 {
96565- if (object_is_on_stack(addr))
96566+ if (object_starts_on_stack(addr))
96567 err_printk(dev, NULL, "DMA-API: device driver maps memory from "
96568 "stack [addr=%p]\n", addr);
96569 }
96570diff --git a/lib/inflate.c b/lib/inflate.c
96571index 013a761..c28f3fc 100644
96572--- a/lib/inflate.c
96573+++ b/lib/inflate.c
96574@@ -269,7 +269,7 @@ static void free(void *where)
96575 malloc_ptr = free_mem_ptr;
96576 }
96577 #else
96578-#define malloc(a) kmalloc(a, GFP_KERNEL)
96579+#define malloc(a) kmalloc((a), GFP_KERNEL)
96580 #define free(a) kfree(a)
96581 #endif
96582
96583diff --git a/lib/ioremap.c b/lib/ioremap.c
96584index 0c9216c..863bd89 100644
96585--- a/lib/ioremap.c
96586+++ b/lib/ioremap.c
96587@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
96588 unsigned long next;
96589
96590 phys_addr -= addr;
96591- pmd = pmd_alloc(&init_mm, pud, addr);
96592+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
96593 if (!pmd)
96594 return -ENOMEM;
96595 do {
96596@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
96597 unsigned long next;
96598
96599 phys_addr -= addr;
96600- pud = pud_alloc(&init_mm, pgd, addr);
96601+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
96602 if (!pud)
96603 return -ENOMEM;
96604 do {
96605diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
96606index bd2bea9..6b3c95e 100644
96607--- a/lib/is_single_threaded.c
96608+++ b/lib/is_single_threaded.c
96609@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
96610 struct task_struct *p, *t;
96611 bool ret;
96612
96613+ if (!mm)
96614+ return true;
96615+
96616 if (atomic_read(&task->signal->live) != 1)
96617 return false;
96618
96619diff --git a/lib/kobject.c b/lib/kobject.c
96620index 03d4ab3..46f6374 100644
96621--- a/lib/kobject.c
96622+++ b/lib/kobject.c
96623@@ -931,9 +931,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
96624
96625
96626 static DEFINE_SPINLOCK(kobj_ns_type_lock);
96627-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
96628+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
96629
96630-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
96631+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
96632 {
96633 enum kobj_ns_type type = ops->type;
96634 int error;
96635diff --git a/lib/list_debug.c b/lib/list_debug.c
96636index c24c2f7..f0296f4 100644
96637--- a/lib/list_debug.c
96638+++ b/lib/list_debug.c
96639@@ -11,7 +11,9 @@
96640 #include <linux/bug.h>
96641 #include <linux/kernel.h>
96642 #include <linux/rculist.h>
96643+#include <linux/mm.h>
96644
96645+#ifdef CONFIG_DEBUG_LIST
96646 /*
96647 * Insert a new entry between two known consecutive entries.
96648 *
96649@@ -19,21 +21,40 @@
96650 * the prev/next entries already!
96651 */
96652
96653+static bool __list_add_debug(struct list_head *new,
96654+ struct list_head *prev,
96655+ struct list_head *next)
96656+{
96657+ if (unlikely(next->prev != prev)) {
96658+ printk(KERN_ERR "list_add corruption. next->prev should be "
96659+ "prev (%p), but was %p. (next=%p).\n",
96660+ prev, next->prev, next);
96661+ BUG();
96662+ return false;
96663+ }
96664+ if (unlikely(prev->next != next)) {
96665+ printk(KERN_ERR "list_add corruption. prev->next should be "
96666+ "next (%p), but was %p. (prev=%p).\n",
96667+ next, prev->next, prev);
96668+ BUG();
96669+ return false;
96670+ }
96671+ if (unlikely(new == prev || new == next)) {
96672+ printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
96673+ new, prev, next);
96674+ BUG();
96675+ return false;
96676+ }
96677+ return true;
96678+}
96679+
96680 void __list_add(struct list_head *new,
96681- struct list_head *prev,
96682- struct list_head *next)
96683+ struct list_head *prev,
96684+ struct list_head *next)
96685 {
96686- WARN(next->prev != prev,
96687- "list_add corruption. next->prev should be "
96688- "prev (%p), but was %p. (next=%p).\n",
96689- prev, next->prev, next);
96690- WARN(prev->next != next,
96691- "list_add corruption. prev->next should be "
96692- "next (%p), but was %p. (prev=%p).\n",
96693- next, prev->next, prev);
96694- WARN(new == prev || new == next,
96695- "list_add double add: new=%p, prev=%p, next=%p.\n",
96696- new, prev, next);
96697+ if (!__list_add_debug(new, prev, next))
96698+ return;
96699+
96700 next->prev = new;
96701 new->next = next;
96702 new->prev = prev;
96703@@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
96704 }
96705 EXPORT_SYMBOL(__list_add);
96706
96707-void __list_del_entry(struct list_head *entry)
96708+static bool __list_del_entry_debug(struct list_head *entry)
96709 {
96710 struct list_head *prev, *next;
96711
96712 prev = entry->prev;
96713 next = entry->next;
96714
96715- if (WARN(next == LIST_POISON1,
96716- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
96717- entry, LIST_POISON1) ||
96718- WARN(prev == LIST_POISON2,
96719- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
96720- entry, LIST_POISON2) ||
96721- WARN(prev->next != entry,
96722- "list_del corruption. prev->next should be %p, "
96723- "but was %p\n", entry, prev->next) ||
96724- WARN(next->prev != entry,
96725- "list_del corruption. next->prev should be %p, "
96726- "but was %p\n", entry, next->prev))
96727+ if (unlikely(next == LIST_POISON1)) {
96728+ printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
96729+ entry, LIST_POISON1);
96730+ BUG();
96731+ return false;
96732+ }
96733+ if (unlikely(prev == LIST_POISON2)) {
96734+ printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
96735+ entry, LIST_POISON2);
96736+ BUG();
96737+ return false;
96738+ }
96739+ if (unlikely(entry->prev->next != entry)) {
96740+ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
96741+ "but was %p\n", entry, prev->next);
96742+ BUG();
96743+ return false;
96744+ }
96745+ if (unlikely(entry->next->prev != entry)) {
96746+ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
96747+ "but was %p\n", entry, next->prev);
96748+ BUG();
96749+ return false;
96750+ }
96751+ return true;
96752+}
96753+
96754+void __list_del_entry(struct list_head *entry)
96755+{
96756+ if (!__list_del_entry_debug(entry))
96757 return;
96758
96759- __list_del(prev, next);
96760+ __list_del(entry->prev, entry->next);
96761 }
96762 EXPORT_SYMBOL(__list_del_entry);
96763
96764@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
96765 void __list_add_rcu(struct list_head *new,
96766 struct list_head *prev, struct list_head *next)
96767 {
96768- WARN(next->prev != prev,
96769- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
96770- prev, next->prev, next);
96771- WARN(prev->next != next,
96772- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
96773- next, prev->next, prev);
96774+ if (!__list_add_debug(new, prev, next))
96775+ return;
96776+
96777 new->next = next;
96778 new->prev = prev;
96779 rcu_assign_pointer(list_next_rcu(prev), new);
96780 next->prev = new;
96781 }
96782 EXPORT_SYMBOL(__list_add_rcu);
96783+#endif
96784+
96785+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
96786+{
96787+#ifdef CONFIG_DEBUG_LIST
96788+ if (!__list_add_debug(new, prev, next))
96789+ return;
96790+#endif
96791+
96792+ pax_open_kernel();
96793+ next->prev = new;
96794+ new->next = next;
96795+ new->prev = prev;
96796+ prev->next = new;
96797+ pax_close_kernel();
96798+}
96799+EXPORT_SYMBOL(__pax_list_add);
96800+
96801+void pax_list_del(struct list_head *entry)
96802+{
96803+#ifdef CONFIG_DEBUG_LIST
96804+ if (!__list_del_entry_debug(entry))
96805+ return;
96806+#endif
96807+
96808+ pax_open_kernel();
96809+ __list_del(entry->prev, entry->next);
96810+ entry->next = LIST_POISON1;
96811+ entry->prev = LIST_POISON2;
96812+ pax_close_kernel();
96813+}
96814+EXPORT_SYMBOL(pax_list_del);
96815+
96816+void pax_list_del_init(struct list_head *entry)
96817+{
96818+ pax_open_kernel();
96819+ __list_del(entry->prev, entry->next);
96820+ INIT_LIST_HEAD(entry);
96821+ pax_close_kernel();
96822+}
96823+EXPORT_SYMBOL(pax_list_del_init);
96824+
96825+void __pax_list_add_rcu(struct list_head *new,
96826+ struct list_head *prev, struct list_head *next)
96827+{
96828+#ifdef CONFIG_DEBUG_LIST
96829+ if (!__list_add_debug(new, prev, next))
96830+ return;
96831+#endif
96832+
96833+ pax_open_kernel();
96834+ new->next = next;
96835+ new->prev = prev;
96836+ rcu_assign_pointer(list_next_rcu(prev), new);
96837+ next->prev = new;
96838+ pax_close_kernel();
96839+}
96840+EXPORT_SYMBOL(__pax_list_add_rcu);
96841+
96842+void pax_list_del_rcu(struct list_head *entry)
96843+{
96844+#ifdef CONFIG_DEBUG_LIST
96845+ if (!__list_del_entry_debug(entry))
96846+ return;
96847+#endif
96848+
96849+ pax_open_kernel();
96850+ __list_del(entry->prev, entry->next);
96851+ entry->next = LIST_POISON1;
96852+ entry->prev = LIST_POISON2;
96853+ pax_close_kernel();
96854+}
96855+EXPORT_SYMBOL(pax_list_del_rcu);
96856diff --git a/lib/lockref.c b/lib/lockref.c
96857index ecb9a66..a044fc5 100644
96858--- a/lib/lockref.c
96859+++ b/lib/lockref.c
96860@@ -48,13 +48,13 @@
96861 void lockref_get(struct lockref *lockref)
96862 {
96863 CMPXCHG_LOOP(
96864- new.count++;
96865+ __lockref_inc(&new);
96866 ,
96867 return;
96868 );
96869
96870 spin_lock(&lockref->lock);
96871- lockref->count++;
96872+ __lockref_inc(lockref);
96873 spin_unlock(&lockref->lock);
96874 }
96875 EXPORT_SYMBOL(lockref_get);
96876@@ -69,8 +69,8 @@ int lockref_get_not_zero(struct lockref *lockref)
96877 int retval;
96878
96879 CMPXCHG_LOOP(
96880- new.count++;
96881- if (old.count <= 0)
96882+ __lockref_inc(&new);
96883+ if (__lockref_read(&old) <= 0)
96884 return 0;
96885 ,
96886 return 1;
96887@@ -78,8 +78,8 @@ int lockref_get_not_zero(struct lockref *lockref)
96888
96889 spin_lock(&lockref->lock);
96890 retval = 0;
96891- if (lockref->count > 0) {
96892- lockref->count++;
96893+ if (__lockref_read(lockref) > 0) {
96894+ __lockref_inc(lockref);
96895 retval = 1;
96896 }
96897 spin_unlock(&lockref->lock);
96898@@ -96,17 +96,17 @@ EXPORT_SYMBOL(lockref_get_not_zero);
96899 int lockref_get_or_lock(struct lockref *lockref)
96900 {
96901 CMPXCHG_LOOP(
96902- new.count++;
96903- if (old.count <= 0)
96904+ __lockref_inc(&new);
96905+ if (__lockref_read(&old) <= 0)
96906 break;
96907 ,
96908 return 1;
96909 );
96910
96911 spin_lock(&lockref->lock);
96912- if (lockref->count <= 0)
96913+ if (__lockref_read(lockref) <= 0)
96914 return 0;
96915- lockref->count++;
96916+ __lockref_inc(lockref);
96917 spin_unlock(&lockref->lock);
96918 return 1;
96919 }
96920@@ -122,11 +122,11 @@ EXPORT_SYMBOL(lockref_get_or_lock);
96921 int lockref_put_return(struct lockref *lockref)
96922 {
96923 CMPXCHG_LOOP(
96924- new.count--;
96925- if (old.count <= 0)
96926+ __lockref_dec(&new);
96927+ if (__lockref_read(&old) <= 0)
96928 return -1;
96929 ,
96930- return new.count;
96931+ return __lockref_read(&new);
96932 );
96933 return -1;
96934 }
96935@@ -140,17 +140,17 @@ EXPORT_SYMBOL(lockref_put_return);
96936 int lockref_put_or_lock(struct lockref *lockref)
96937 {
96938 CMPXCHG_LOOP(
96939- new.count--;
96940- if (old.count <= 1)
96941+ __lockref_dec(&new);
96942+ if (__lockref_read(&old) <= 1)
96943 break;
96944 ,
96945 return 1;
96946 );
96947
96948 spin_lock(&lockref->lock);
96949- if (lockref->count <= 1)
96950+ if (__lockref_read(lockref) <= 1)
96951 return 0;
96952- lockref->count--;
96953+ __lockref_dec(lockref);
96954 spin_unlock(&lockref->lock);
96955 return 1;
96956 }
96957@@ -163,7 +163,7 @@ EXPORT_SYMBOL(lockref_put_or_lock);
96958 void lockref_mark_dead(struct lockref *lockref)
96959 {
96960 assert_spin_locked(&lockref->lock);
96961- lockref->count = -128;
96962+ __lockref_set(lockref, -128);
96963 }
96964 EXPORT_SYMBOL(lockref_mark_dead);
96965
96966@@ -177,8 +177,8 @@ int lockref_get_not_dead(struct lockref *lockref)
96967 int retval;
96968
96969 CMPXCHG_LOOP(
96970- new.count++;
96971- if (old.count < 0)
96972+ __lockref_inc(&new);
96973+ if (__lockref_read(&old) < 0)
96974 return 0;
96975 ,
96976 return 1;
96977@@ -186,8 +186,8 @@ int lockref_get_not_dead(struct lockref *lockref)
96978
96979 spin_lock(&lockref->lock);
96980 retval = 0;
96981- if (lockref->count >= 0) {
96982- lockref->count++;
96983+ if (__lockref_read(lockref) >= 0) {
96984+ __lockref_inc(lockref);
96985 retval = 1;
96986 }
96987 spin_unlock(&lockref->lock);
96988diff --git a/lib/nlattr.c b/lib/nlattr.c
96989index f5907d2..36072be 100644
96990--- a/lib/nlattr.c
96991+++ b/lib/nlattr.c
96992@@ -278,6 +278,8 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count)
96993 {
96994 int minlen = min_t(int, count, nla_len(src));
96995
96996+ BUG_ON(minlen < 0);
96997+
96998 memcpy(dest, nla_data(src), minlen);
96999 if (count > minlen)
97000 memset(dest + minlen, 0, count - minlen);
97001diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
97002index 6111bcb..02e816b 100644
97003--- a/lib/percpu-refcount.c
97004+++ b/lib/percpu-refcount.c
97005@@ -31,7 +31,7 @@
97006 * atomic_long_t can't hit 0 before we've added up all the percpu refs.
97007 */
97008
97009-#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
97010+#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 2))
97011
97012 static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
97013
97014diff --git a/lib/radix-tree.c b/lib/radix-tree.c
97015index 3d2aa27..a472f20 100644
97016--- a/lib/radix-tree.c
97017+++ b/lib/radix-tree.c
97018@@ -67,7 +67,7 @@ struct radix_tree_preload {
97019 int nr;
97020 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
97021 };
97022-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
97023+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
97024
97025 static inline void *ptr_to_indirect(void *ptr)
97026 {
97027diff --git a/lib/random32.c b/lib/random32.c
97028index 0bee183..526f12f 100644
97029--- a/lib/random32.c
97030+++ b/lib/random32.c
97031@@ -47,7 +47,7 @@ static inline void prandom_state_selftest(void)
97032 }
97033 #endif
97034
97035-static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
97036+static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
97037
97038 /**
97039 * prandom_u32_state - seeded pseudo-random number generator.
97040diff --git a/lib/rbtree.c b/lib/rbtree.c
97041index c16c81a..4dcbda1 100644
97042--- a/lib/rbtree.c
97043+++ b/lib/rbtree.c
97044@@ -380,7 +380,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
97045 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
97046
97047 static const struct rb_augment_callbacks dummy_callbacks = {
97048- dummy_propagate, dummy_copy, dummy_rotate
97049+ .propagate = dummy_propagate,
97050+ .copy = dummy_copy,
97051+ .rotate = dummy_rotate
97052 };
97053
97054 void rb_insert_color(struct rb_node *node, struct rb_root *root)
97055diff --git a/lib/show_mem.c b/lib/show_mem.c
97056index adc98e18..0ce83c2 100644
97057--- a/lib/show_mem.c
97058+++ b/lib/show_mem.c
97059@@ -49,6 +49,6 @@ void show_mem(unsigned int filter)
97060 quicklist_total_size());
97061 #endif
97062 #ifdef CONFIG_MEMORY_FAILURE
97063- printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
97064+ printk("%lu pages hwpoisoned\n", atomic_long_read_unchecked(&num_poisoned_pages));
97065 #endif
97066 }
97067diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
97068index e0af6ff..fcc9f15 100644
97069--- a/lib/strncpy_from_user.c
97070+++ b/lib/strncpy_from_user.c
97071@@ -22,7 +22,7 @@
97072 */
97073 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
97074 {
97075- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
97076+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
97077 long res = 0;
97078
97079 /*
97080diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
97081index a28df52..3d55877 100644
97082--- a/lib/strnlen_user.c
97083+++ b/lib/strnlen_user.c
97084@@ -26,7 +26,7 @@
97085 */
97086 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
97087 {
97088- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
97089+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
97090 long align, res = 0;
97091 unsigned long c;
97092
97093diff --git a/lib/swiotlb.c b/lib/swiotlb.c
97094index 4abda07..b9d3765 100644
97095--- a/lib/swiotlb.c
97096+++ b/lib/swiotlb.c
97097@@ -682,7 +682,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
97098
97099 void
97100 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
97101- dma_addr_t dev_addr)
97102+ dma_addr_t dev_addr, struct dma_attrs *attrs)
97103 {
97104 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
97105
97106diff --git a/lib/usercopy.c b/lib/usercopy.c
97107index 4f5b1dd..7cab418 100644
97108--- a/lib/usercopy.c
97109+++ b/lib/usercopy.c
97110@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
97111 WARN(1, "Buffer overflow detected!\n");
97112 }
97113 EXPORT_SYMBOL(copy_from_user_overflow);
97114+
97115+void copy_to_user_overflow(void)
97116+{
97117+ WARN(1, "Buffer overflow detected!\n");
97118+}
97119+EXPORT_SYMBOL(copy_to_user_overflow);
97120diff --git a/lib/vsprintf.c b/lib/vsprintf.c
97121index b235c96..343ffc1 100644
97122--- a/lib/vsprintf.c
97123+++ b/lib/vsprintf.c
97124@@ -16,6 +16,9 @@
97125 * - scnprintf and vscnprintf
97126 */
97127
97128+#ifdef CONFIG_GRKERNSEC_HIDESYM
97129+#define __INCLUDED_BY_HIDESYM 1
97130+#endif
97131 #include <stdarg.h>
97132 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
97133 #include <linux/types.h>
97134@@ -626,7 +629,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
97135 #ifdef CONFIG_KALLSYMS
97136 if (*fmt == 'B')
97137 sprint_backtrace(sym, value);
97138- else if (*fmt != 'f' && *fmt != 's')
97139+ else if (*fmt != 'f' && *fmt != 's' && *fmt != 'X')
97140 sprint_symbol(sym, value);
97141 else
97142 sprint_symbol_no_offset(sym, value);
97143@@ -1322,7 +1325,11 @@ char *address_val(char *buf, char *end, const void *addr,
97144 return number(buf, end, num, spec);
97145 }
97146
97147+#ifdef CONFIG_GRKERNSEC_HIDESYM
97148+int kptr_restrict __read_mostly = 2;
97149+#else
97150 int kptr_restrict __read_mostly;
97151+#endif
97152
97153 /*
97154 * Show a '%p' thing. A kernel extension is that the '%p' is followed
97155@@ -1333,8 +1340,10 @@ int kptr_restrict __read_mostly;
97156 *
97157 * - 'F' For symbolic function descriptor pointers with offset
97158 * - 'f' For simple symbolic function names without offset
97159+ * - 'X' For simple symbolic function names without offset approved for use with GRKERNSEC_HIDESYM
97160 * - 'S' For symbolic direct pointers with offset
97161 * - 's' For symbolic direct pointers without offset
97162+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
97163 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
97164 * - 'B' For backtraced symbolic direct pointers with offset
97165 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
97166@@ -1417,12 +1426,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
97167
97168 if (!ptr && *fmt != 'K') {
97169 /*
97170- * Print (null) with the same width as a pointer so it makes
97171+ * Print (nil) with the same width as a pointer so it makes
97172 * tabular output look nice.
97173 */
97174 if (spec.field_width == -1)
97175 spec.field_width = default_width;
97176- return string(buf, end, "(null)", spec);
97177+ return string(buf, end, "(nil)", spec);
97178 }
97179
97180 switch (*fmt) {
97181@@ -1432,6 +1441,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
97182 /* Fallthrough */
97183 case 'S':
97184 case 's':
97185+#ifdef CONFIG_GRKERNSEC_HIDESYM
97186+ break;
97187+#else
97188+ return symbol_string(buf, end, ptr, spec, fmt);
97189+#endif
97190+ case 'X':
97191+ ptr = dereference_function_descriptor(ptr);
97192+ case 'A':
97193 case 'B':
97194 return symbol_string(buf, end, ptr, spec, fmt);
97195 case 'R':
97196@@ -1496,6 +1513,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
97197 va_end(va);
97198 return buf;
97199 }
97200+ case 'P':
97201+ break;
97202 case 'K':
97203 /*
97204 * %pK cannot be used in IRQ context because its test
97205@@ -1553,6 +1572,22 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
97206 ((const struct file *)ptr)->f_path.dentry,
97207 spec, fmt);
97208 }
97209+
97210+#ifdef CONFIG_GRKERNSEC_HIDESYM
97211+ /* 'P' = approved pointers to copy to userland,
97212+ as in the /proc/kallsyms case, as we make it display nothing
97213+ for non-root users, and the real contents for root users
97214+ 'X' = approved simple symbols
97215+ Also ignore 'K' pointers, since we force their NULLing for non-root users
97216+ above
97217+ */
97218+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'X' && *fmt != 'K' && is_usercopy_object(buf)) {
97219+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
97220+ dump_stack();
97221+ ptr = NULL;
97222+ }
97223+#endif
97224+
97225 spec.flags |= SMALL;
97226 if (spec.field_width == -1) {
97227 spec.field_width = default_width;
97228@@ -2254,11 +2289,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
97229 typeof(type) value; \
97230 if (sizeof(type) == 8) { \
97231 args = PTR_ALIGN(args, sizeof(u32)); \
97232- *(u32 *)&value = *(u32 *)args; \
97233- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
97234+ *(u32 *)&value = *(const u32 *)args; \
97235+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
97236 } else { \
97237 args = PTR_ALIGN(args, sizeof(type)); \
97238- value = *(typeof(type) *)args; \
97239+ value = *(const typeof(type) *)args; \
97240 } \
97241 args += sizeof(type); \
97242 value; \
97243@@ -2321,7 +2356,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
97244 case FORMAT_TYPE_STR: {
97245 const char *str_arg = args;
97246 args += strlen(str_arg) + 1;
97247- str = string(str, end, (char *)str_arg, spec);
97248+ str = string(str, end, str_arg, spec);
97249 break;
97250 }
97251
97252diff --git a/localversion-grsec b/localversion-grsec
97253new file mode 100644
97254index 0000000..7cd6065
97255--- /dev/null
97256+++ b/localversion-grsec
97257@@ -0,0 +1 @@
97258+-grsec
97259diff --git a/mm/Kconfig b/mm/Kconfig
97260index a03131b..1b1bafb 100644
97261--- a/mm/Kconfig
97262+++ b/mm/Kconfig
97263@@ -342,10 +342,11 @@ config KSM
97264 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
97265
97266 config DEFAULT_MMAP_MIN_ADDR
97267- int "Low address space to protect from user allocation"
97268+ int "Low address space to protect from user allocation"
97269 depends on MMU
97270- default 4096
97271- help
97272+ default 32768 if ALPHA || ARM || PARISC || SPARC32
97273+ default 65536
97274+ help
97275 This is the portion of low virtual memory which should be protected
97276 from userspace allocation. Keeping a user from writing to low pages
97277 can help reduce the impact of kernel NULL pointer bugs.
97278@@ -376,7 +377,7 @@ config MEMORY_FAILURE
97279
97280 config HWPOISON_INJECT
97281 tristate "HWPoison pages injector"
97282- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
97283+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
97284 select PROC_PAGE_MONITOR
97285
97286 config NOMMU_INITIAL_TRIM_EXCESS
97287diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
97288index 957d3da..1d34e20 100644
97289--- a/mm/Kconfig.debug
97290+++ b/mm/Kconfig.debug
97291@@ -10,6 +10,7 @@ config PAGE_EXTENSION
97292 config DEBUG_PAGEALLOC
97293 bool "Debug page memory allocations"
97294 depends on DEBUG_KERNEL
97295+ depends on !PAX_MEMORY_SANITIZE
97296 depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC
97297 depends on !KMEMCHECK
97298 select PAGE_EXTENSION
97299diff --git a/mm/backing-dev.c b/mm/backing-dev.c
97300index 6dc4580..e031ec1 100644
97301--- a/mm/backing-dev.c
97302+++ b/mm/backing-dev.c
97303@@ -12,7 +12,7 @@
97304 #include <linux/device.h>
97305 #include <trace/events/writeback.h>
97306
97307-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
97308+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
97309
97310 struct backing_dev_info noop_backing_dev_info = {
97311 .name = "noop",
97312@@ -474,7 +474,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name)
97313 return err;
97314
97315 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
97316- atomic_long_inc_return(&bdi_seq));
97317+ atomic_long_inc_return_unchecked(&bdi_seq));
97318 if (err) {
97319 bdi_destroy(bdi);
97320 return err;
97321diff --git a/mm/filemap.c b/mm/filemap.c
97322index ad72420..0a20ef2 100644
97323--- a/mm/filemap.c
97324+++ b/mm/filemap.c
97325@@ -2097,7 +2097,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
97326 struct address_space *mapping = file->f_mapping;
97327
97328 if (!mapping->a_ops->readpage)
97329- return -ENOEXEC;
97330+ return -ENODEV;
97331 file_accessed(file);
97332 vma->vm_ops = &generic_file_vm_ops;
97333 return 0;
97334@@ -2275,6 +2275,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
97335 *pos = i_size_read(inode);
97336
97337 if (limit != RLIM_INFINITY) {
97338+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
97339 if (*pos >= limit) {
97340 send_sig(SIGXFSZ, current, 0);
97341 return -EFBIG;
97342diff --git a/mm/gup.c b/mm/gup.c
97343index a6e24e2..72dd2cf 100644
97344--- a/mm/gup.c
97345+++ b/mm/gup.c
97346@@ -265,11 +265,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
97347 unsigned int fault_flags = 0;
97348 int ret;
97349
97350- /* For mlock, just skip the stack guard page. */
97351- if ((*flags & FOLL_MLOCK) &&
97352- (stack_guard_page_start(vma, address) ||
97353- stack_guard_page_end(vma, address + PAGE_SIZE)))
97354- return -ENOENT;
97355 if (*flags & FOLL_WRITE)
97356 fault_flags |= FAULT_FLAG_WRITE;
97357 if (nonblocking)
97358@@ -435,14 +430,14 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
97359 if (!(gup_flags & FOLL_FORCE))
97360 gup_flags |= FOLL_NUMA;
97361
97362- do {
97363+ while (nr_pages) {
97364 struct page *page;
97365 unsigned int foll_flags = gup_flags;
97366 unsigned int page_increm;
97367
97368 /* first iteration or cross vma bound */
97369 if (!vma || start >= vma->vm_end) {
97370- vma = find_extend_vma(mm, start);
97371+ vma = find_vma(mm, start);
97372 if (!vma && in_gate_area(mm, start)) {
97373 int ret;
97374 ret = get_gate_page(mm, start & PAGE_MASK,
97375@@ -454,7 +449,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
97376 goto next_page;
97377 }
97378
97379- if (!vma || check_vma_flags(vma, gup_flags))
97380+ if (!vma || start < vma->vm_start || check_vma_flags(vma, gup_flags))
97381 return i ? : -EFAULT;
97382 if (is_vm_hugetlb_page(vma)) {
97383 i = follow_hugetlb_page(mm, vma, pages, vmas,
97384@@ -509,7 +504,7 @@ next_page:
97385 i += page_increm;
97386 start += page_increm * PAGE_SIZE;
97387 nr_pages -= page_increm;
97388- } while (nr_pages);
97389+ }
97390 return i;
97391 }
97392 EXPORT_SYMBOL(__get_user_pages);
97393diff --git a/mm/highmem.c b/mm/highmem.c
97394index 123bcd3..0de52ba 100644
97395--- a/mm/highmem.c
97396+++ b/mm/highmem.c
97397@@ -195,8 +195,9 @@ static void flush_all_zero_pkmaps(void)
97398 * So no dangers, even with speculative execution.
97399 */
97400 page = pte_page(pkmap_page_table[i]);
97401+ pax_open_kernel();
97402 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
97403-
97404+ pax_close_kernel();
97405 set_page_address(page, NULL);
97406 need_flush = 1;
97407 }
97408@@ -259,9 +260,11 @@ start:
97409 }
97410 }
97411 vaddr = PKMAP_ADDR(last_pkmap_nr);
97412+
97413+ pax_open_kernel();
97414 set_pte_at(&init_mm, vaddr,
97415 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
97416-
97417+ pax_close_kernel();
97418 pkmap_count[last_pkmap_nr] = 1;
97419 set_page_address(page, (void *)vaddr);
97420
97421diff --git a/mm/hugetlb.c b/mm/hugetlb.c
97422index caad3c5..4f68807 100644
97423--- a/mm/hugetlb.c
97424+++ b/mm/hugetlb.c
97425@@ -2260,6 +2260,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
97426 struct ctl_table *table, int write,
97427 void __user *buffer, size_t *length, loff_t *ppos)
97428 {
97429+ ctl_table_no_const t;
97430 struct hstate *h = &default_hstate;
97431 unsigned long tmp = h->max_huge_pages;
97432 int ret;
97433@@ -2267,9 +2268,10 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
97434 if (!hugepages_supported())
97435 return -ENOTSUPP;
97436
97437- table->data = &tmp;
97438- table->maxlen = sizeof(unsigned long);
97439- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
97440+ t = *table;
97441+ t.data = &tmp;
97442+ t.maxlen = sizeof(unsigned long);
97443+ ret = proc_doulongvec_minmax(&t, write, buffer, length, ppos);
97444 if (ret)
97445 goto out;
97446
97447@@ -2304,6 +2306,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
97448 struct hstate *h = &default_hstate;
97449 unsigned long tmp;
97450 int ret;
97451+ ctl_table_no_const hugetlb_table;
97452
97453 if (!hugepages_supported())
97454 return -ENOTSUPP;
97455@@ -2313,9 +2316,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
97456 if (write && hstate_is_gigantic(h))
97457 return -EINVAL;
97458
97459- table->data = &tmp;
97460- table->maxlen = sizeof(unsigned long);
97461- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
97462+ hugetlb_table = *table;
97463+ hugetlb_table.data = &tmp;
97464+ hugetlb_table.maxlen = sizeof(unsigned long);
97465+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
97466 if (ret)
97467 goto out;
97468
97469@@ -2800,6 +2804,27 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
97470 i_mmap_unlock_write(mapping);
97471 }
97472
97473+#ifdef CONFIG_PAX_SEGMEXEC
97474+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
97475+{
97476+ struct mm_struct *mm = vma->vm_mm;
97477+ struct vm_area_struct *vma_m;
97478+ unsigned long address_m;
97479+ pte_t *ptep_m;
97480+
97481+ vma_m = pax_find_mirror_vma(vma);
97482+ if (!vma_m)
97483+ return;
97484+
97485+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
97486+ address_m = address + SEGMEXEC_TASK_SIZE;
97487+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
97488+ get_page(page_m);
97489+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
97490+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
97491+}
97492+#endif
97493+
97494 /*
97495 * Hugetlb_cow() should be called with page lock of the original hugepage held.
97496 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
97497@@ -2912,6 +2937,11 @@ retry_avoidcopy:
97498 make_huge_pte(vma, new_page, 1));
97499 page_remove_rmap(old_page);
97500 hugepage_add_new_anon_rmap(new_page, vma, address);
97501+
97502+#ifdef CONFIG_PAX_SEGMEXEC
97503+ pax_mirror_huge_pte(vma, address, new_page);
97504+#endif
97505+
97506 /* Make the old page be freed below */
97507 new_page = old_page;
97508 }
97509@@ -3072,6 +3102,10 @@ retry:
97510 && (vma->vm_flags & VM_SHARED)));
97511 set_huge_pte_at(mm, address, ptep, new_pte);
97512
97513+#ifdef CONFIG_PAX_SEGMEXEC
97514+ pax_mirror_huge_pte(vma, address, page);
97515+#endif
97516+
97517 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
97518 /* Optimization, do the COW without a second fault */
97519 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
97520@@ -3139,6 +3173,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
97521 struct address_space *mapping;
97522 int need_wait_lock = 0;
97523
97524+#ifdef CONFIG_PAX_SEGMEXEC
97525+ struct vm_area_struct *vma_m;
97526+#endif
97527+
97528 address &= huge_page_mask(h);
97529
97530 ptep = huge_pte_offset(mm, address);
97531@@ -3152,6 +3190,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
97532 VM_FAULT_SET_HINDEX(hstate_index(h));
97533 }
97534
97535+#ifdef CONFIG_PAX_SEGMEXEC
97536+ vma_m = pax_find_mirror_vma(vma);
97537+ if (vma_m) {
97538+ unsigned long address_m;
97539+
97540+ if (vma->vm_start > vma_m->vm_start) {
97541+ address_m = address;
97542+ address -= SEGMEXEC_TASK_SIZE;
97543+ vma = vma_m;
97544+ h = hstate_vma(vma);
97545+ } else
97546+ address_m = address + SEGMEXEC_TASK_SIZE;
97547+
97548+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
97549+ return VM_FAULT_OOM;
97550+ address_m &= HPAGE_MASK;
97551+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
97552+ }
97553+#endif
97554+
97555 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
97556 if (!ptep)
97557 return VM_FAULT_OOM;
97558diff --git a/mm/internal.h b/mm/internal.h
97559index a96da5b..42ebd54 100644
97560--- a/mm/internal.h
97561+++ b/mm/internal.h
97562@@ -156,6 +156,7 @@ __find_buddy_index(unsigned long page_idx, unsigned int order)
97563
97564 extern int __isolate_free_page(struct page *page, unsigned int order);
97565 extern void __free_pages_bootmem(struct page *page, unsigned int order);
97566+extern void free_compound_page(struct page *page);
97567 extern void prep_compound_page(struct page *page, unsigned long order);
97568 #ifdef CONFIG_MEMORY_FAILURE
97569 extern bool is_free_buddy_page(struct page *page);
97570@@ -411,7 +412,7 @@ extern u32 hwpoison_filter_enable;
97571
97572 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
97573 unsigned long, unsigned long,
97574- unsigned long, unsigned long);
97575+ unsigned long, unsigned long) __intentional_overflow(-1);
97576
97577 extern void set_pageblock_order(void);
97578 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
97579diff --git a/mm/kmemleak.c b/mm/kmemleak.c
97580index 5405aff..483406d 100644
97581--- a/mm/kmemleak.c
97582+++ b/mm/kmemleak.c
97583@@ -365,7 +365,7 @@ static void print_unreferenced(struct seq_file *seq,
97584
97585 for (i = 0; i < object->trace_len; i++) {
97586 void *ptr = (void *)object->trace[i];
97587- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
97588+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
97589 }
97590 }
97591
97592@@ -1911,7 +1911,7 @@ static int __init kmemleak_late_init(void)
97593 return -ENOMEM;
97594 }
97595
97596- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
97597+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
97598 &kmemleak_fops);
97599 if (!dentry)
97600 pr_warning("Failed to create the debugfs kmemleak file\n");
97601diff --git a/mm/maccess.c b/mm/maccess.c
97602index d53adf9..03a24bf 100644
97603--- a/mm/maccess.c
97604+++ b/mm/maccess.c
97605@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
97606 set_fs(KERNEL_DS);
97607 pagefault_disable();
97608 ret = __copy_from_user_inatomic(dst,
97609- (__force const void __user *)src, size);
97610+ (const void __force_user *)src, size);
97611 pagefault_enable();
97612 set_fs(old_fs);
97613
97614@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
97615
97616 set_fs(KERNEL_DS);
97617 pagefault_disable();
97618- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
97619+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
97620 pagefault_enable();
97621 set_fs(old_fs);
97622
97623diff --git a/mm/madvise.c b/mm/madvise.c
97624index d551475..8fdd7f3 100644
97625--- a/mm/madvise.c
97626+++ b/mm/madvise.c
97627@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
97628 pgoff_t pgoff;
97629 unsigned long new_flags = vma->vm_flags;
97630
97631+#ifdef CONFIG_PAX_SEGMEXEC
97632+ struct vm_area_struct *vma_m;
97633+#endif
97634+
97635 switch (behavior) {
97636 case MADV_NORMAL:
97637 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
97638@@ -126,6 +130,13 @@ success:
97639 /*
97640 * vm_flags is protected by the mmap_sem held in write mode.
97641 */
97642+
97643+#ifdef CONFIG_PAX_SEGMEXEC
97644+ vma_m = pax_find_mirror_vma(vma);
97645+ if (vma_m)
97646+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
97647+#endif
97648+
97649 vma->vm_flags = new_flags;
97650
97651 out:
97652@@ -277,11 +288,27 @@ static long madvise_dontneed(struct vm_area_struct *vma,
97653 struct vm_area_struct **prev,
97654 unsigned long start, unsigned long end)
97655 {
97656+
97657+#ifdef CONFIG_PAX_SEGMEXEC
97658+ struct vm_area_struct *vma_m;
97659+#endif
97660+
97661 *prev = vma;
97662 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
97663 return -EINVAL;
97664
97665 zap_page_range(vma, start, end - start, NULL);
97666+
97667+#ifdef CONFIG_PAX_SEGMEXEC
97668+ vma_m = pax_find_mirror_vma(vma);
97669+ if (vma_m) {
97670+ if (vma_m->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
97671+ return -EINVAL;
97672+
97673+ zap_page_range(vma_m, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
97674+ }
97675+#endif
97676+
97677 return 0;
97678 }
97679
97680@@ -484,6 +511,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
97681 if (end < start)
97682 return error;
97683
97684+#ifdef CONFIG_PAX_SEGMEXEC
97685+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
97686+ if (end > SEGMEXEC_TASK_SIZE)
97687+ return error;
97688+ } else
97689+#endif
97690+
97691+ if (end > TASK_SIZE)
97692+ return error;
97693+
97694 error = 0;
97695 if (end == start)
97696 return error;
97697diff --git a/mm/memory-failure.c b/mm/memory-failure.c
97698index 72a5224..51ba846 100644
97699--- a/mm/memory-failure.c
97700+++ b/mm/memory-failure.c
97701@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
97702
97703 int sysctl_memory_failure_recovery __read_mostly = 1;
97704
97705-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
97706+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
97707
97708 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
97709
97710@@ -198,7 +198,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
97711 pfn, t->comm, t->pid);
97712 si.si_signo = SIGBUS;
97713 si.si_errno = 0;
97714- si.si_addr = (void *)addr;
97715+ si.si_addr = (void __user *)addr;
97716 #ifdef __ARCH_SI_TRAPNO
97717 si.si_trapno = trapno;
97718 #endif
97719@@ -779,7 +779,7 @@ static struct page_state {
97720 unsigned long res;
97721 char *msg;
97722 int (*action)(struct page *p, unsigned long pfn);
97723-} error_states[] = {
97724+} __do_const error_states[] = {
97725 { reserved, reserved, "reserved kernel", me_kernel },
97726 /*
97727 * free pages are specially detected outside this table:
97728@@ -1087,7 +1087,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
97729 nr_pages = 1 << compound_order(hpage);
97730 else /* normal page or thp */
97731 nr_pages = 1;
97732- atomic_long_add(nr_pages, &num_poisoned_pages);
97733+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
97734
97735 /*
97736 * We need/can do nothing about count=0 pages.
97737@@ -1116,7 +1116,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
97738 if (PageHWPoison(hpage)) {
97739 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
97740 || (p != hpage && TestSetPageHWPoison(hpage))) {
97741- atomic_long_sub(nr_pages, &num_poisoned_pages);
97742+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
97743 unlock_page(hpage);
97744 return 0;
97745 }
97746@@ -1184,14 +1184,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
97747 */
97748 if (!PageHWPoison(p)) {
97749 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
97750- atomic_long_sub(nr_pages, &num_poisoned_pages);
97751+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
97752 put_page(hpage);
97753 res = 0;
97754 goto out;
97755 }
97756 if (hwpoison_filter(p)) {
97757 if (TestClearPageHWPoison(p))
97758- atomic_long_sub(nr_pages, &num_poisoned_pages);
97759+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
97760 unlock_page(hpage);
97761 put_page(hpage);
97762 return 0;
97763@@ -1421,7 +1421,7 @@ int unpoison_memory(unsigned long pfn)
97764 return 0;
97765 }
97766 if (TestClearPageHWPoison(p))
97767- atomic_long_dec(&num_poisoned_pages);
97768+ atomic_long_dec_unchecked(&num_poisoned_pages);
97769 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
97770 return 0;
97771 }
97772@@ -1435,7 +1435,7 @@ int unpoison_memory(unsigned long pfn)
97773 */
97774 if (TestClearPageHWPoison(page)) {
97775 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
97776- atomic_long_sub(nr_pages, &num_poisoned_pages);
97777+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
97778 freeit = 1;
97779 if (PageHuge(page))
97780 clear_page_hwpoison_huge_page(page);
97781@@ -1560,11 +1560,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
97782 if (PageHuge(page)) {
97783 set_page_hwpoison_huge_page(hpage);
97784 dequeue_hwpoisoned_huge_page(hpage);
97785- atomic_long_add(1 << compound_order(hpage),
97786+ atomic_long_add_unchecked(1 << compound_order(hpage),
97787 &num_poisoned_pages);
97788 } else {
97789 SetPageHWPoison(page);
97790- atomic_long_inc(&num_poisoned_pages);
97791+ atomic_long_inc_unchecked(&num_poisoned_pages);
97792 }
97793 }
97794 return ret;
97795@@ -1603,7 +1603,7 @@ static int __soft_offline_page(struct page *page, int flags)
97796 put_page(page);
97797 pr_info("soft_offline: %#lx: invalidated\n", pfn);
97798 SetPageHWPoison(page);
97799- atomic_long_inc(&num_poisoned_pages);
97800+ atomic_long_inc_unchecked(&num_poisoned_pages);
97801 return 0;
97802 }
97803
97804@@ -1652,7 +1652,7 @@ static int __soft_offline_page(struct page *page, int flags)
97805 if (!is_free_buddy_page(page))
97806 pr_info("soft offline: %#lx: page leaked\n",
97807 pfn);
97808- atomic_long_inc(&num_poisoned_pages);
97809+ atomic_long_inc_unchecked(&num_poisoned_pages);
97810 }
97811 } else {
97812 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
97813@@ -1722,11 +1722,11 @@ int soft_offline_page(struct page *page, int flags)
97814 if (PageHuge(page)) {
97815 set_page_hwpoison_huge_page(hpage);
97816 if (!dequeue_hwpoisoned_huge_page(hpage))
97817- atomic_long_add(1 << compound_order(hpage),
97818+ atomic_long_add_unchecked(1 << compound_order(hpage),
97819 &num_poisoned_pages);
97820 } else {
97821 if (!TestSetPageHWPoison(page))
97822- atomic_long_inc(&num_poisoned_pages);
97823+ atomic_long_inc_unchecked(&num_poisoned_pages);
97824 }
97825 }
97826 unset_migratetype_isolate(page, MIGRATE_MOVABLE);
97827diff --git a/mm/memory.c b/mm/memory.c
97828index 97839f5..4bc5530 100644
97829--- a/mm/memory.c
97830+++ b/mm/memory.c
97831@@ -414,6 +414,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
97832 free_pte_range(tlb, pmd, addr);
97833 } while (pmd++, addr = next, addr != end);
97834
97835+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
97836 start &= PUD_MASK;
97837 if (start < floor)
97838 return;
97839@@ -429,6 +430,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
97840 pud_clear(pud);
97841 pmd_free_tlb(tlb, pmd, start);
97842 mm_dec_nr_pmds(tlb->mm);
97843+#endif
97844 }
97845
97846 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
97847@@ -448,6 +450,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
97848 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
97849 } while (pud++, addr = next, addr != end);
97850
97851+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
97852 start &= PGDIR_MASK;
97853 if (start < floor)
97854 return;
97855@@ -462,6 +465,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
97856 pud = pud_offset(pgd, start);
97857 pgd_clear(pgd);
97858 pud_free_tlb(tlb, pud, start);
97859+#endif
97860+
97861 }
97862
97863 /*
97864@@ -691,10 +696,10 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
97865 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
97866 */
97867 if (vma->vm_ops)
97868- printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n",
97869+ printk(KERN_ALERT "vma->vm_ops->fault: %pAR\n",
97870 vma->vm_ops->fault);
97871 if (vma->vm_file)
97872- printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n",
97873+ printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pAR\n",
97874 vma->vm_file->f_op->mmap);
97875 dump_stack();
97876 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
97877@@ -1464,6 +1469,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
97878 page_add_file_rmap(page);
97879 set_pte_at(mm, addr, pte, mk_pte(page, prot));
97880
97881+#ifdef CONFIG_PAX_SEGMEXEC
97882+ pax_mirror_file_pte(vma, addr, page, ptl);
97883+#endif
97884+
97885 retval = 0;
97886 pte_unmap_unlock(pte, ptl);
97887 return retval;
97888@@ -1508,9 +1517,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
97889 if (!page_count(page))
97890 return -EINVAL;
97891 if (!(vma->vm_flags & VM_MIXEDMAP)) {
97892+
97893+#ifdef CONFIG_PAX_SEGMEXEC
97894+ struct vm_area_struct *vma_m;
97895+#endif
97896+
97897 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
97898 BUG_ON(vma->vm_flags & VM_PFNMAP);
97899 vma->vm_flags |= VM_MIXEDMAP;
97900+
97901+#ifdef CONFIG_PAX_SEGMEXEC
97902+ vma_m = pax_find_mirror_vma(vma);
97903+ if (vma_m)
97904+ vma_m->vm_flags |= VM_MIXEDMAP;
97905+#endif
97906+
97907 }
97908 return insert_page(vma, addr, page, vma->vm_page_prot);
97909 }
97910@@ -1593,6 +1614,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
97911 unsigned long pfn)
97912 {
97913 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
97914+ BUG_ON(vma->vm_mirror);
97915
97916 if (addr < vma->vm_start || addr >= vma->vm_end)
97917 return -EFAULT;
97918@@ -1840,7 +1862,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
97919
97920 BUG_ON(pud_huge(*pud));
97921
97922- pmd = pmd_alloc(mm, pud, addr);
97923+ pmd = (mm == &init_mm) ?
97924+ pmd_alloc_kernel(mm, pud, addr) :
97925+ pmd_alloc(mm, pud, addr);
97926 if (!pmd)
97927 return -ENOMEM;
97928 do {
97929@@ -1860,7 +1884,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
97930 unsigned long next;
97931 int err;
97932
97933- pud = pud_alloc(mm, pgd, addr);
97934+ pud = (mm == &init_mm) ?
97935+ pud_alloc_kernel(mm, pgd, addr) :
97936+ pud_alloc(mm, pgd, addr);
97937 if (!pud)
97938 return -ENOMEM;
97939 do {
97940@@ -1982,6 +2008,185 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
97941 return ret;
97942 }
97943
97944+#ifdef CONFIG_PAX_SEGMEXEC
97945+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
97946+{
97947+ struct mm_struct *mm = vma->vm_mm;
97948+ spinlock_t *ptl;
97949+ pte_t *pte, entry;
97950+
97951+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
97952+ entry = *pte;
97953+ if (!pte_present(entry)) {
97954+ if (!pte_none(entry)) {
97955+ free_swap_and_cache(pte_to_swp_entry(entry));
97956+ pte_clear_not_present_full(mm, address, pte, 0);
97957+ }
97958+ } else {
97959+ struct page *page;
97960+
97961+ flush_cache_page(vma, address, pte_pfn(entry));
97962+ entry = ptep_clear_flush(vma, address, pte);
97963+ BUG_ON(pte_dirty(entry));
97964+ page = vm_normal_page(vma, address, entry);
97965+ if (page) {
97966+ update_hiwater_rss(mm);
97967+ if (PageAnon(page))
97968+ dec_mm_counter_fast(mm, MM_ANONPAGES);
97969+ else
97970+ dec_mm_counter_fast(mm, MM_FILEPAGES);
97971+ page_remove_rmap(page);
97972+ page_cache_release(page);
97973+ }
97974+ }
97975+ pte_unmap_unlock(pte, ptl);
97976+}
97977+
97978+/* PaX: if vma is mirrored, synchronize the mirror's PTE
97979+ *
97980+ * the ptl of the lower mapped page is held on entry and is not released on exit
97981+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
97982+ */
97983+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
97984+{
97985+ struct mm_struct *mm = vma->vm_mm;
97986+ unsigned long address_m;
97987+ spinlock_t *ptl_m;
97988+ struct vm_area_struct *vma_m;
97989+ pmd_t *pmd_m;
97990+ pte_t *pte_m, entry_m;
97991+
97992+ BUG_ON(!page_m || !PageAnon(page_m));
97993+
97994+ vma_m = pax_find_mirror_vma(vma);
97995+ if (!vma_m)
97996+ return;
97997+
97998+ BUG_ON(!PageLocked(page_m));
97999+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
98000+ address_m = address + SEGMEXEC_TASK_SIZE;
98001+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
98002+ pte_m = pte_offset_map(pmd_m, address_m);
98003+ ptl_m = pte_lockptr(mm, pmd_m);
98004+ if (ptl != ptl_m) {
98005+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
98006+ if (!pte_none(*pte_m))
98007+ goto out;
98008+ }
98009+
98010+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
98011+ page_cache_get(page_m);
98012+ page_add_anon_rmap(page_m, vma_m, address_m);
98013+ inc_mm_counter_fast(mm, MM_ANONPAGES);
98014+ set_pte_at(mm, address_m, pte_m, entry_m);
98015+ update_mmu_cache(vma_m, address_m, pte_m);
98016+out:
98017+ if (ptl != ptl_m)
98018+ spin_unlock(ptl_m);
98019+ pte_unmap(pte_m);
98020+ unlock_page(page_m);
98021+}
98022+
98023+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
98024+{
98025+ struct mm_struct *mm = vma->vm_mm;
98026+ unsigned long address_m;
98027+ spinlock_t *ptl_m;
98028+ struct vm_area_struct *vma_m;
98029+ pmd_t *pmd_m;
98030+ pte_t *pte_m, entry_m;
98031+
98032+ BUG_ON(!page_m || PageAnon(page_m));
98033+
98034+ vma_m = pax_find_mirror_vma(vma);
98035+ if (!vma_m)
98036+ return;
98037+
98038+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
98039+ address_m = address + SEGMEXEC_TASK_SIZE;
98040+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
98041+ pte_m = pte_offset_map(pmd_m, address_m);
98042+ ptl_m = pte_lockptr(mm, pmd_m);
98043+ if (ptl != ptl_m) {
98044+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
98045+ if (!pte_none(*pte_m))
98046+ goto out;
98047+ }
98048+
98049+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
98050+ page_cache_get(page_m);
98051+ page_add_file_rmap(page_m);
98052+ inc_mm_counter_fast(mm, MM_FILEPAGES);
98053+ set_pte_at(mm, address_m, pte_m, entry_m);
98054+ update_mmu_cache(vma_m, address_m, pte_m);
98055+out:
98056+ if (ptl != ptl_m)
98057+ spin_unlock(ptl_m);
98058+ pte_unmap(pte_m);
98059+}
98060+
98061+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
98062+{
98063+ struct mm_struct *mm = vma->vm_mm;
98064+ unsigned long address_m;
98065+ spinlock_t *ptl_m;
98066+ struct vm_area_struct *vma_m;
98067+ pmd_t *pmd_m;
98068+ pte_t *pte_m, entry_m;
98069+
98070+ vma_m = pax_find_mirror_vma(vma);
98071+ if (!vma_m)
98072+ return;
98073+
98074+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
98075+ address_m = address + SEGMEXEC_TASK_SIZE;
98076+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
98077+ pte_m = pte_offset_map(pmd_m, address_m);
98078+ ptl_m = pte_lockptr(mm, pmd_m);
98079+ if (ptl != ptl_m) {
98080+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
98081+ if (!pte_none(*pte_m))
98082+ goto out;
98083+ }
98084+
98085+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
98086+ set_pte_at(mm, address_m, pte_m, entry_m);
98087+out:
98088+ if (ptl != ptl_m)
98089+ spin_unlock(ptl_m);
98090+ pte_unmap(pte_m);
98091+}
98092+
98093+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
98094+{
98095+ struct page *page_m;
98096+ pte_t entry;
98097+
98098+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
98099+ goto out;
98100+
98101+ entry = *pte;
98102+ page_m = vm_normal_page(vma, address, entry);
98103+ if (!page_m)
98104+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
98105+ else if (PageAnon(page_m)) {
98106+ if (pax_find_mirror_vma(vma)) {
98107+ pte_unmap_unlock(pte, ptl);
98108+ lock_page(page_m);
98109+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
98110+ if (pte_same(entry, *pte))
98111+ pax_mirror_anon_pte(vma, address, page_m, ptl);
98112+ else
98113+ unlock_page(page_m);
98114+ }
98115+ } else
98116+ pax_mirror_file_pte(vma, address, page_m, ptl);
98117+
98118+out:
98119+ pte_unmap_unlock(pte, ptl);
98120+}
98121+#endif
98122+
98123 /*
98124 * This routine handles present pages, when users try to write
98125 * to a shared page. It is done by copying the page to a new address
98126@@ -2172,6 +2377,12 @@ gotten:
98127 */
98128 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
98129 if (likely(pte_same(*page_table, orig_pte))) {
98130+
98131+#ifdef CONFIG_PAX_SEGMEXEC
98132+ if (pax_find_mirror_vma(vma))
98133+ BUG_ON(!trylock_page(new_page));
98134+#endif
98135+
98136 if (old_page) {
98137 if (!PageAnon(old_page)) {
98138 dec_mm_counter_fast(mm, MM_FILEPAGES);
98139@@ -2225,6 +2436,10 @@ gotten:
98140 page_remove_rmap(old_page);
98141 }
98142
98143+#ifdef CONFIG_PAX_SEGMEXEC
98144+ pax_mirror_anon_pte(vma, address, new_page, ptl);
98145+#endif
98146+
98147 /* Free the old page.. */
98148 new_page = old_page;
98149 ret |= VM_FAULT_WRITE;
98150@@ -2483,6 +2698,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
98151 swap_free(entry);
98152 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
98153 try_to_free_swap(page);
98154+
98155+#ifdef CONFIG_PAX_SEGMEXEC
98156+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
98157+#endif
98158+
98159 unlock_page(page);
98160 if (page != swapcache) {
98161 /*
98162@@ -2506,6 +2726,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
98163
98164 /* No need to invalidate - it was non-present before */
98165 update_mmu_cache(vma, address, page_table);
98166+
98167+#ifdef CONFIG_PAX_SEGMEXEC
98168+ pax_mirror_anon_pte(vma, address, page, ptl);
98169+#endif
98170+
98171 unlock:
98172 pte_unmap_unlock(page_table, ptl);
98173 out:
98174@@ -2525,40 +2750,6 @@ out_release:
98175 }
98176
98177 /*
98178- * This is like a special single-page "expand_{down|up}wards()",
98179- * except we must first make sure that 'address{-|+}PAGE_SIZE'
98180- * doesn't hit another vma.
98181- */
98182-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
98183-{
98184- address &= PAGE_MASK;
98185- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
98186- struct vm_area_struct *prev = vma->vm_prev;
98187-
98188- /*
98189- * Is there a mapping abutting this one below?
98190- *
98191- * That's only ok if it's the same stack mapping
98192- * that has gotten split..
98193- */
98194- if (prev && prev->vm_end == address)
98195- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
98196-
98197- return expand_downwards(vma, address - PAGE_SIZE);
98198- }
98199- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
98200- struct vm_area_struct *next = vma->vm_next;
98201-
98202- /* As VM_GROWSDOWN but s/below/above/ */
98203- if (next && next->vm_start == address + PAGE_SIZE)
98204- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
98205-
98206- return expand_upwards(vma, address + PAGE_SIZE);
98207- }
98208- return 0;
98209-}
98210-
98211-/*
98212 * We enter with non-exclusive mmap_sem (to exclude vma changes,
98213 * but allow concurrent faults), and pte mapped but not yet locked.
98214 * We return with mmap_sem still held, but pte unmapped and unlocked.
98215@@ -2568,27 +2759,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
98216 unsigned int flags)
98217 {
98218 struct mem_cgroup *memcg;
98219- struct page *page;
98220+ struct page *page = NULL;
98221 spinlock_t *ptl;
98222 pte_t entry;
98223
98224- pte_unmap(page_table);
98225-
98226- /* Check if we need to add a guard page to the stack */
98227- if (check_stack_guard_page(vma, address) < 0)
98228- return VM_FAULT_SIGSEGV;
98229-
98230- /* Use the zero-page for reads */
98231 if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
98232 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
98233 vma->vm_page_prot));
98234- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
98235+ ptl = pte_lockptr(mm, pmd);
98236+ spin_lock(ptl);
98237 if (!pte_none(*page_table))
98238 goto unlock;
98239 goto setpte;
98240 }
98241
98242 /* Allocate our own private page. */
98243+ pte_unmap(page_table);
98244+
98245 if (unlikely(anon_vma_prepare(vma)))
98246 goto oom;
98247 page = alloc_zeroed_user_highpage_movable(vma, address);
98248@@ -2612,6 +2799,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
98249 if (!pte_none(*page_table))
98250 goto release;
98251
98252+#ifdef CONFIG_PAX_SEGMEXEC
98253+ if (pax_find_mirror_vma(vma))
98254+ BUG_ON(!trylock_page(page));
98255+#endif
98256+
98257 inc_mm_counter_fast(mm, MM_ANONPAGES);
98258 page_add_new_anon_rmap(page, vma, address);
98259 mem_cgroup_commit_charge(page, memcg, false);
98260@@ -2621,6 +2813,12 @@ setpte:
98261
98262 /* No need to invalidate - it was non-present before */
98263 update_mmu_cache(vma, address, page_table);
98264+
98265+#ifdef CONFIG_PAX_SEGMEXEC
98266+ if (page)
98267+ pax_mirror_anon_pte(vma, address, page, ptl);
98268+#endif
98269+
98270 unlock:
98271 pte_unmap_unlock(page_table, ptl);
98272 return 0;
98273@@ -2853,6 +3051,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
98274 return ret;
98275 }
98276 do_set_pte(vma, address, fault_page, pte, false, false);
98277+
98278+#ifdef CONFIG_PAX_SEGMEXEC
98279+ pax_mirror_file_pte(vma, address, fault_page, ptl);
98280+#endif
98281+
98282 unlock_page(fault_page);
98283 unlock_out:
98284 pte_unmap_unlock(pte, ptl);
98285@@ -2904,7 +3107,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
98286 }
98287 goto uncharge_out;
98288 }
98289+
98290+#ifdef CONFIG_PAX_SEGMEXEC
98291+ if (pax_find_mirror_vma(vma))
98292+ BUG_ON(!trylock_page(new_page));
98293+#endif
98294+
98295 do_set_pte(vma, address, new_page, pte, true, true);
98296+
98297+#ifdef CONFIG_PAX_SEGMEXEC
98298+ pax_mirror_anon_pte(vma, address, new_page, ptl);
98299+#endif
98300+
98301 mem_cgroup_commit_charge(new_page, memcg, false);
98302 lru_cache_add_active_or_unevictable(new_page, vma);
98303 pte_unmap_unlock(pte, ptl);
98304@@ -2962,6 +3176,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
98305 return ret;
98306 }
98307 do_set_pte(vma, address, fault_page, pte, true, false);
98308+
98309+#ifdef CONFIG_PAX_SEGMEXEC
98310+ pax_mirror_file_pte(vma, address, fault_page, ptl);
98311+#endif
98312+
98313 pte_unmap_unlock(pte, ptl);
98314
98315 if (set_page_dirty(fault_page))
98316@@ -3185,6 +3404,12 @@ static int handle_pte_fault(struct mm_struct *mm,
98317 if (flags & FAULT_FLAG_WRITE)
98318 flush_tlb_fix_spurious_fault(vma, address);
98319 }
98320+
98321+#ifdef CONFIG_PAX_SEGMEXEC
98322+ pax_mirror_pte(vma, address, pte, pmd, ptl);
98323+ return 0;
98324+#endif
98325+
98326 unlock:
98327 pte_unmap_unlock(pte, ptl);
98328 return 0;
98329@@ -3204,9 +3429,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
98330 pmd_t *pmd;
98331 pte_t *pte;
98332
98333+#ifdef CONFIG_PAX_SEGMEXEC
98334+ struct vm_area_struct *vma_m;
98335+#endif
98336+
98337 if (unlikely(is_vm_hugetlb_page(vma)))
98338 return hugetlb_fault(mm, vma, address, flags);
98339
98340+#ifdef CONFIG_PAX_SEGMEXEC
98341+ vma_m = pax_find_mirror_vma(vma);
98342+ if (vma_m) {
98343+ unsigned long address_m;
98344+ pgd_t *pgd_m;
98345+ pud_t *pud_m;
98346+ pmd_t *pmd_m;
98347+
98348+ if (vma->vm_start > vma_m->vm_start) {
98349+ address_m = address;
98350+ address -= SEGMEXEC_TASK_SIZE;
98351+ vma = vma_m;
98352+ } else
98353+ address_m = address + SEGMEXEC_TASK_SIZE;
98354+
98355+ pgd_m = pgd_offset(mm, address_m);
98356+ pud_m = pud_alloc(mm, pgd_m, address_m);
98357+ if (!pud_m)
98358+ return VM_FAULT_OOM;
98359+ pmd_m = pmd_alloc(mm, pud_m, address_m);
98360+ if (!pmd_m)
98361+ return VM_FAULT_OOM;
98362+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
98363+ return VM_FAULT_OOM;
98364+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
98365+ }
98366+#endif
98367+
98368 pgd = pgd_offset(mm, address);
98369 pud = pud_alloc(mm, pgd, address);
98370 if (!pud)
98371@@ -3341,6 +3598,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
98372 spin_unlock(&mm->page_table_lock);
98373 return 0;
98374 }
98375+
98376+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
98377+{
98378+ pud_t *new = pud_alloc_one(mm, address);
98379+ if (!new)
98380+ return -ENOMEM;
98381+
98382+ smp_wmb(); /* See comment in __pte_alloc */
98383+
98384+ spin_lock(&mm->page_table_lock);
98385+ if (pgd_present(*pgd)) /* Another has populated it */
98386+ pud_free(mm, new);
98387+ else
98388+ pgd_populate_kernel(mm, pgd, new);
98389+ spin_unlock(&mm->page_table_lock);
98390+ return 0;
98391+}
98392 #endif /* __PAGETABLE_PUD_FOLDED */
98393
98394 #ifndef __PAGETABLE_PMD_FOLDED
98395@@ -3373,6 +3647,32 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
98396 spin_unlock(&mm->page_table_lock);
98397 return 0;
98398 }
98399+
98400+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
98401+{
98402+ pmd_t *new = pmd_alloc_one(mm, address);
98403+ if (!new)
98404+ return -ENOMEM;
98405+
98406+ smp_wmb(); /* See comment in __pte_alloc */
98407+
98408+ spin_lock(&mm->page_table_lock);
98409+#ifndef __ARCH_HAS_4LEVEL_HACK
98410+ if (!pud_present(*pud)) {
98411+ mm_inc_nr_pmds(mm);
98412+ pud_populate_kernel(mm, pud, new);
98413+ } else /* Another has populated it */
98414+ pmd_free(mm, new);
98415+#else
98416+ if (!pgd_present(*pud)) {
98417+ mm_inc_nr_pmds(mm);
98418+ pgd_populate_kernel(mm, pud, new);
98419+ } else /* Another has populated it */
98420+ pmd_free(mm, new);
98421+#endif /* __ARCH_HAS_4LEVEL_HACK */
98422+ spin_unlock(&mm->page_table_lock);
98423+ return 0;
98424+}
98425 #endif /* __PAGETABLE_PMD_FOLDED */
98426
98427 static int __follow_pte(struct mm_struct *mm, unsigned long address,
98428@@ -3482,8 +3782,8 @@ out:
98429 return ret;
98430 }
98431
98432-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
98433- void *buf, int len, int write)
98434+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
98435+ void *buf, size_t len, int write)
98436 {
98437 resource_size_t phys_addr;
98438 unsigned long prot = 0;
98439@@ -3509,8 +3809,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
98440 * Access another process' address space as given in mm. If non-NULL, use the
98441 * given task for page fault accounting.
98442 */
98443-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98444- unsigned long addr, void *buf, int len, int write)
98445+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98446+ unsigned long addr, void *buf, size_t len, int write)
98447 {
98448 struct vm_area_struct *vma;
98449 void *old_buf = buf;
98450@@ -3518,7 +3818,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98451 down_read(&mm->mmap_sem);
98452 /* ignore errors, just check how much was successfully transferred */
98453 while (len) {
98454- int bytes, ret, offset;
98455+ ssize_t bytes, ret, offset;
98456 void *maddr;
98457 struct page *page = NULL;
98458
98459@@ -3579,8 +3879,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98460 *
98461 * The caller must hold a reference on @mm.
98462 */
98463-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
98464- void *buf, int len, int write)
98465+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
98466+ void *buf, size_t len, int write)
98467 {
98468 return __access_remote_vm(NULL, mm, addr, buf, len, write);
98469 }
98470@@ -3590,11 +3890,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
98471 * Source/target buffer must be kernel space,
98472 * Do not walk the page table directly, use get_user_pages
98473 */
98474-int access_process_vm(struct task_struct *tsk, unsigned long addr,
98475- void *buf, int len, int write)
98476+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
98477+ void *buf, size_t len, int write)
98478 {
98479 struct mm_struct *mm;
98480- int ret;
98481+ ssize_t ret;
98482
98483 mm = get_task_mm(tsk);
98484 if (!mm)
98485diff --git a/mm/mempolicy.c b/mm/mempolicy.c
98486index de5dc5e..68a4ea3 100644
98487--- a/mm/mempolicy.c
98488+++ b/mm/mempolicy.c
98489@@ -703,6 +703,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
98490 unsigned long vmstart;
98491 unsigned long vmend;
98492
98493+#ifdef CONFIG_PAX_SEGMEXEC
98494+ struct vm_area_struct *vma_m;
98495+#endif
98496+
98497 vma = find_vma(mm, start);
98498 if (!vma || vma->vm_start > start)
98499 return -EFAULT;
98500@@ -746,6 +750,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
98501 err = vma_replace_policy(vma, new_pol);
98502 if (err)
98503 goto out;
98504+
98505+#ifdef CONFIG_PAX_SEGMEXEC
98506+ vma_m = pax_find_mirror_vma(vma);
98507+ if (vma_m) {
98508+ err = vma_replace_policy(vma_m, new_pol);
98509+ if (err)
98510+ goto out;
98511+ }
98512+#endif
98513+
98514 }
98515
98516 out:
98517@@ -1160,6 +1174,17 @@ static long do_mbind(unsigned long start, unsigned long len,
98518
98519 if (end < start)
98520 return -EINVAL;
98521+
98522+#ifdef CONFIG_PAX_SEGMEXEC
98523+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
98524+ if (end > SEGMEXEC_TASK_SIZE)
98525+ return -EINVAL;
98526+ } else
98527+#endif
98528+
98529+ if (end > TASK_SIZE)
98530+ return -EINVAL;
98531+
98532 if (end == start)
98533 return 0;
98534
98535@@ -1385,8 +1410,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
98536 */
98537 tcred = __task_cred(task);
98538 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
98539- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
98540- !capable(CAP_SYS_NICE)) {
98541+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
98542 rcu_read_unlock();
98543 err = -EPERM;
98544 goto out_put;
98545@@ -1417,6 +1441,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
98546 goto out;
98547 }
98548
98549+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
98550+ if (mm != current->mm &&
98551+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
98552+ mmput(mm);
98553+ err = -EPERM;
98554+ goto out;
98555+ }
98556+#endif
98557+
98558 err = do_migrate_pages(mm, old, new,
98559 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
98560
98561diff --git a/mm/migrate.c b/mm/migrate.c
98562index 85e0426..be49beb 100644
98563--- a/mm/migrate.c
98564+++ b/mm/migrate.c
98565@@ -1472,8 +1472,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
98566 */
98567 tcred = __task_cred(task);
98568 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
98569- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
98570- !capable(CAP_SYS_NICE)) {
98571+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
98572 rcu_read_unlock();
98573 err = -EPERM;
98574 goto out;
98575diff --git a/mm/mlock.c b/mm/mlock.c
98576index 8a54cd2..92f1747 100644
98577--- a/mm/mlock.c
98578+++ b/mm/mlock.c
98579@@ -14,6 +14,7 @@
98580 #include <linux/pagevec.h>
98581 #include <linux/mempolicy.h>
98582 #include <linux/syscalls.h>
98583+#include <linux/security.h>
98584 #include <linux/sched.h>
98585 #include <linux/export.h>
98586 #include <linux/rmap.h>
98587@@ -613,7 +614,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
98588 {
98589 unsigned long nstart, end, tmp;
98590 struct vm_area_struct * vma, * prev;
98591- int error;
98592+ int error = 0;
98593
98594 VM_BUG_ON(start & ~PAGE_MASK);
98595 VM_BUG_ON(len != PAGE_ALIGN(len));
98596@@ -622,6 +623,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
98597 return -EINVAL;
98598 if (end == start)
98599 return 0;
98600+ if (end > TASK_SIZE)
98601+ return -EINVAL;
98602+
98603 vma = find_vma(current->mm, start);
98604 if (!vma || vma->vm_start > start)
98605 return -ENOMEM;
98606@@ -633,6 +637,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
98607 for (nstart = start ; ; ) {
98608 vm_flags_t newflags;
98609
98610+#ifdef CONFIG_PAX_SEGMEXEC
98611+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
98612+ break;
98613+#endif
98614+
98615 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
98616
98617 newflags = vma->vm_flags & ~VM_LOCKED;
98618@@ -746,6 +755,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
98619 locked += current->mm->locked_vm;
98620
98621 /* check against resource limits */
98622+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
98623 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
98624 error = do_mlock(start, len, 1);
98625
98626@@ -783,6 +793,11 @@ static int do_mlockall(int flags)
98627 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
98628 vm_flags_t newflags;
98629
98630+#ifdef CONFIG_PAX_SEGMEXEC
98631+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
98632+ break;
98633+#endif
98634+
98635 newflags = vma->vm_flags & ~VM_LOCKED;
98636 if (flags & MCL_CURRENT)
98637 newflags |= VM_LOCKED;
98638@@ -814,8 +829,10 @@ SYSCALL_DEFINE1(mlockall, int, flags)
98639 lock_limit >>= PAGE_SHIFT;
98640
98641 ret = -ENOMEM;
98642+
98643+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
98644+
98645 down_write(&current->mm->mmap_sem);
98646-
98647 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
98648 capable(CAP_IPC_LOCK))
98649 ret = do_mlockall(flags);
98650diff --git a/mm/mm_init.c b/mm/mm_init.c
98651index 5f420f7..dd42fb1b 100644
98652--- a/mm/mm_init.c
98653+++ b/mm/mm_init.c
98654@@ -177,7 +177,7 @@ static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
98655 return NOTIFY_OK;
98656 }
98657
98658-static struct notifier_block compute_batch_nb __meminitdata = {
98659+static struct notifier_block compute_batch_nb __meminitconst = {
98660 .notifier_call = mm_compute_batch_notifier,
98661 .priority = IPC_CALLBACK_PRI, /* use lowest priority */
98662 };
98663diff --git a/mm/mmap.c b/mm/mmap.c
98664index 9ec50a3..0476e2d 100644
98665--- a/mm/mmap.c
98666+++ b/mm/mmap.c
98667@@ -41,6 +41,7 @@
98668 #include <linux/notifier.h>
98669 #include <linux/memory.h>
98670 #include <linux/printk.h>
98671+#include <linux/random.h>
98672
98673 #include <asm/uaccess.h>
98674 #include <asm/cacheflush.h>
98675@@ -57,6 +58,16 @@
98676 #define arch_rebalance_pgtables(addr, len) (addr)
98677 #endif
98678
98679+static inline void verify_mm_writelocked(struct mm_struct *mm)
98680+{
98681+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
98682+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
98683+ up_read(&mm->mmap_sem);
98684+ BUG();
98685+ }
98686+#endif
98687+}
98688+
98689 static void unmap_region(struct mm_struct *mm,
98690 struct vm_area_struct *vma, struct vm_area_struct *prev,
98691 unsigned long start, unsigned long end);
98692@@ -76,16 +87,25 @@ static void unmap_region(struct mm_struct *mm,
98693 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
98694 *
98695 */
98696-pgprot_t protection_map[16] = {
98697+pgprot_t protection_map[16] __read_only = {
98698 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
98699 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
98700 };
98701
98702-pgprot_t vm_get_page_prot(unsigned long vm_flags)
98703+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
98704 {
98705- return __pgprot(pgprot_val(protection_map[vm_flags &
98706+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
98707 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
98708 pgprot_val(arch_vm_get_page_prot(vm_flags)));
98709+
98710+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
98711+ if (!(__supported_pte_mask & _PAGE_NX) &&
98712+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
98713+ (vm_flags & (VM_READ | VM_WRITE)))
98714+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
98715+#endif
98716+
98717+ return prot;
98718 }
98719 EXPORT_SYMBOL(vm_get_page_prot);
98720
98721@@ -114,6 +134,7 @@ unsigned long sysctl_overcommit_kbytes __read_mostly;
98722 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
98723 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
98724 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
98725+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
98726 /*
98727 * Make sure vm_committed_as in one cacheline and not cacheline shared with
98728 * other variables. It can be updated by several CPUs frequently.
98729@@ -271,6 +292,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
98730 struct vm_area_struct *next = vma->vm_next;
98731
98732 might_sleep();
98733+ BUG_ON(vma->vm_mirror);
98734 if (vma->vm_ops && vma->vm_ops->close)
98735 vma->vm_ops->close(vma);
98736 if (vma->vm_file)
98737@@ -284,6 +306,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len);
98738
98739 SYSCALL_DEFINE1(brk, unsigned long, brk)
98740 {
98741+ unsigned long rlim;
98742 unsigned long retval;
98743 unsigned long newbrk, oldbrk;
98744 struct mm_struct *mm = current->mm;
98745@@ -314,7 +337,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
98746 * segment grow beyond its set limit the in case where the limit is
98747 * not page aligned -Ram Gupta
98748 */
98749- if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
98750+ rlim = rlimit(RLIMIT_DATA);
98751+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
98752+ /* force a minimum 16MB brk heap on setuid/setgid binaries */
98753+ if (rlim < PAGE_SIZE && (get_dumpable(mm) != SUID_DUMP_USER) && gr_is_global_nonroot(current_uid()))
98754+ rlim = 4096 * PAGE_SIZE;
98755+#endif
98756+ if (check_data_rlimit(rlim, brk, mm->start_brk,
98757 mm->end_data, mm->start_data))
98758 goto out;
98759
98760@@ -967,6 +996,12 @@ static int
98761 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
98762 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
98763 {
98764+
98765+#ifdef CONFIG_PAX_SEGMEXEC
98766+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
98767+ return 0;
98768+#endif
98769+
98770 if (is_mergeable_vma(vma, file, vm_flags) &&
98771 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
98772 if (vma->vm_pgoff == vm_pgoff)
98773@@ -986,6 +1021,12 @@ static int
98774 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
98775 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
98776 {
98777+
98778+#ifdef CONFIG_PAX_SEGMEXEC
98779+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
98780+ return 0;
98781+#endif
98782+
98783 if (is_mergeable_vma(vma, file, vm_flags) &&
98784 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
98785 pgoff_t vm_pglen;
98786@@ -1035,6 +1076,13 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
98787 struct vm_area_struct *area, *next;
98788 int err;
98789
98790+#ifdef CONFIG_PAX_SEGMEXEC
98791+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
98792+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
98793+
98794+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
98795+#endif
98796+
98797 /*
98798 * We later require that vma->vm_flags == vm_flags,
98799 * so this tests vma->vm_flags & VM_SPECIAL, too.
98800@@ -1050,6 +1098,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
98801 if (next && next->vm_end == end) /* cases 6, 7, 8 */
98802 next = next->vm_next;
98803
98804+#ifdef CONFIG_PAX_SEGMEXEC
98805+ if (prev)
98806+ prev_m = pax_find_mirror_vma(prev);
98807+ if (area)
98808+ area_m = pax_find_mirror_vma(area);
98809+ if (next)
98810+ next_m = pax_find_mirror_vma(next);
98811+#endif
98812+
98813 /*
98814 * Can it merge with the predecessor?
98815 */
98816@@ -1069,9 +1126,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
98817 /* cases 1, 6 */
98818 err = vma_adjust(prev, prev->vm_start,
98819 next->vm_end, prev->vm_pgoff, NULL);
98820- } else /* cases 2, 5, 7 */
98821+
98822+#ifdef CONFIG_PAX_SEGMEXEC
98823+ if (!err && prev_m)
98824+ err = vma_adjust(prev_m, prev_m->vm_start,
98825+ next_m->vm_end, prev_m->vm_pgoff, NULL);
98826+#endif
98827+
98828+ } else { /* cases 2, 5, 7 */
98829 err = vma_adjust(prev, prev->vm_start,
98830 end, prev->vm_pgoff, NULL);
98831+
98832+#ifdef CONFIG_PAX_SEGMEXEC
98833+ if (!err && prev_m)
98834+ err = vma_adjust(prev_m, prev_m->vm_start,
98835+ end_m, prev_m->vm_pgoff, NULL);
98836+#endif
98837+
98838+ }
98839 if (err)
98840 return NULL;
98841 khugepaged_enter_vma_merge(prev, vm_flags);
98842@@ -1085,12 +1157,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
98843 mpol_equal(policy, vma_policy(next)) &&
98844 can_vma_merge_before(next, vm_flags,
98845 anon_vma, file, pgoff+pglen)) {
98846- if (prev && addr < prev->vm_end) /* case 4 */
98847+ if (prev && addr < prev->vm_end) { /* case 4 */
98848 err = vma_adjust(prev, prev->vm_start,
98849 addr, prev->vm_pgoff, NULL);
98850- else /* cases 3, 8 */
98851+
98852+#ifdef CONFIG_PAX_SEGMEXEC
98853+ if (!err && prev_m)
98854+ err = vma_adjust(prev_m, prev_m->vm_start,
98855+ addr_m, prev_m->vm_pgoff, NULL);
98856+#endif
98857+
98858+ } else { /* cases 3, 8 */
98859 err = vma_adjust(area, addr, next->vm_end,
98860 next->vm_pgoff - pglen, NULL);
98861+
98862+#ifdef CONFIG_PAX_SEGMEXEC
98863+ if (!err && area_m)
98864+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
98865+ next_m->vm_pgoff - pglen, NULL);
98866+#endif
98867+
98868+ }
98869 if (err)
98870 return NULL;
98871 khugepaged_enter_vma_merge(area, vm_flags);
98872@@ -1199,8 +1286,10 @@ none:
98873 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
98874 struct file *file, long pages)
98875 {
98876- const unsigned long stack_flags
98877- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
98878+
98879+#ifdef CONFIG_PAX_RANDMMAP
98880+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
98881+#endif
98882
98883 mm->total_vm += pages;
98884
98885@@ -1208,7 +1297,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
98886 mm->shared_vm += pages;
98887 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
98888 mm->exec_vm += pages;
98889- } else if (flags & stack_flags)
98890+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
98891 mm->stack_vm += pages;
98892 }
98893 #endif /* CONFIG_PROC_FS */
98894@@ -1238,6 +1327,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
98895 locked += mm->locked_vm;
98896 lock_limit = rlimit(RLIMIT_MEMLOCK);
98897 lock_limit >>= PAGE_SHIFT;
98898+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
98899 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
98900 return -EAGAIN;
98901 }
98902@@ -1264,7 +1354,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
98903 * (the exception is when the underlying filesystem is noexec
98904 * mounted, in which case we dont add PROT_EXEC.)
98905 */
98906- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
98907+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
98908 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
98909 prot |= PROT_EXEC;
98910
98911@@ -1290,7 +1380,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
98912 /* Obtain the address to map to. we verify (or select) it and ensure
98913 * that it represents a valid section of the address space.
98914 */
98915- addr = get_unmapped_area(file, addr, len, pgoff, flags);
98916+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
98917 if (addr & ~PAGE_MASK)
98918 return addr;
98919
98920@@ -1301,6 +1391,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
98921 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
98922 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
98923
98924+#ifdef CONFIG_PAX_MPROTECT
98925+ if (mm->pax_flags & MF_PAX_MPROTECT) {
98926+
98927+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
98928+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
98929+ mm->binfmt->handle_mmap)
98930+ mm->binfmt->handle_mmap(file);
98931+#endif
98932+
98933+#ifndef CONFIG_PAX_MPROTECT_COMPAT
98934+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
98935+ gr_log_rwxmmap(file);
98936+
98937+#ifdef CONFIG_PAX_EMUPLT
98938+ vm_flags &= ~VM_EXEC;
98939+#else
98940+ return -EPERM;
98941+#endif
98942+
98943+ }
98944+
98945+ if (!(vm_flags & VM_EXEC))
98946+ vm_flags &= ~VM_MAYEXEC;
98947+#else
98948+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
98949+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
98950+#endif
98951+ else
98952+ vm_flags &= ~VM_MAYWRITE;
98953+ }
98954+#endif
98955+
98956+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
98957+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
98958+ vm_flags &= ~VM_PAGEEXEC;
98959+#endif
98960+
98961 if (flags & MAP_LOCKED)
98962 if (!can_do_mlock())
98963 return -EPERM;
98964@@ -1388,6 +1515,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
98965 vm_flags |= VM_NORESERVE;
98966 }
98967
98968+ if (!gr_acl_handle_mmap(file, prot))
98969+ return -EACCES;
98970+
98971 addr = mmap_region(file, addr, len, vm_flags, pgoff);
98972 if (!IS_ERR_VALUE(addr) &&
98973 ((vm_flags & VM_LOCKED) ||
98974@@ -1481,7 +1611,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
98975 vm_flags_t vm_flags = vma->vm_flags;
98976
98977 /* If it was private or non-writable, the write bit is already clear */
98978- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
98979+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
98980 return 0;
98981
98982 /* The backer wishes to know when pages are first written to? */
98983@@ -1532,7 +1662,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
98984 struct rb_node **rb_link, *rb_parent;
98985 unsigned long charged = 0;
98986
98987+#ifdef CONFIG_PAX_SEGMEXEC
98988+ struct vm_area_struct *vma_m = NULL;
98989+#endif
98990+
98991+ /*
98992+ * mm->mmap_sem is required to protect against another thread
98993+ * changing the mappings in case we sleep.
98994+ */
98995+ verify_mm_writelocked(mm);
98996+
98997 /* Check against address space limit. */
98998+
98999+#ifdef CONFIG_PAX_RANDMMAP
99000+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
99001+#endif
99002+
99003 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
99004 unsigned long nr_pages;
99005
99006@@ -1551,11 +1696,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
99007
99008 /* Clear old maps */
99009 error = -ENOMEM;
99010-munmap_back:
99011 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
99012 if (do_munmap(mm, addr, len))
99013 return -ENOMEM;
99014- goto munmap_back;
99015+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
99016 }
99017
99018 /*
99019@@ -1586,6 +1730,16 @@ munmap_back:
99020 goto unacct_error;
99021 }
99022
99023+#ifdef CONFIG_PAX_SEGMEXEC
99024+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
99025+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
99026+ if (!vma_m) {
99027+ error = -ENOMEM;
99028+ goto free_vma;
99029+ }
99030+ }
99031+#endif
99032+
99033 vma->vm_mm = mm;
99034 vma->vm_start = addr;
99035 vma->vm_end = addr + len;
99036@@ -1616,6 +1770,13 @@ munmap_back:
99037 if (error)
99038 goto unmap_and_free_vma;
99039
99040+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
99041+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
99042+ vma->vm_flags |= VM_PAGEEXEC;
99043+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
99044+ }
99045+#endif
99046+
99047 /* Can addr have changed??
99048 *
99049 * Answer: Yes, several device drivers can do it in their
99050@@ -1634,6 +1795,12 @@ munmap_back:
99051 }
99052
99053 vma_link(mm, vma, prev, rb_link, rb_parent);
99054+
99055+#ifdef CONFIG_PAX_SEGMEXEC
99056+ if (vma_m)
99057+ BUG_ON(pax_mirror_vma(vma_m, vma));
99058+#endif
99059+
99060 /* Once vma denies write, undo our temporary denial count */
99061 if (file) {
99062 if (vm_flags & VM_SHARED)
99063@@ -1646,6 +1813,7 @@ out:
99064 perf_event_mmap(vma);
99065
99066 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
99067+ track_exec_limit(mm, addr, addr + len, vm_flags);
99068 if (vm_flags & VM_LOCKED) {
99069 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
99070 vma == get_gate_vma(current->mm)))
99071@@ -1683,6 +1851,12 @@ allow_write_and_free_vma:
99072 if (vm_flags & VM_DENYWRITE)
99073 allow_write_access(file);
99074 free_vma:
99075+
99076+#ifdef CONFIG_PAX_SEGMEXEC
99077+ if (vma_m)
99078+ kmem_cache_free(vm_area_cachep, vma_m);
99079+#endif
99080+
99081 kmem_cache_free(vm_area_cachep, vma);
99082 unacct_error:
99083 if (charged)
99084@@ -1690,7 +1864,63 @@ unacct_error:
99085 return error;
99086 }
99087
99088-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
99089+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
99090+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
99091+{
99092+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
99093+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
99094+
99095+ return 0;
99096+}
99097+#endif
99098+
99099+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
99100+{
99101+ if (!vma) {
99102+#ifdef CONFIG_STACK_GROWSUP
99103+ if (addr > sysctl_heap_stack_gap)
99104+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
99105+ else
99106+ vma = find_vma(current->mm, 0);
99107+ if (vma && (vma->vm_flags & VM_GROWSUP))
99108+ return false;
99109+#endif
99110+ return true;
99111+ }
99112+
99113+ if (addr + len > vma->vm_start)
99114+ return false;
99115+
99116+ if (vma->vm_flags & VM_GROWSDOWN)
99117+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
99118+#ifdef CONFIG_STACK_GROWSUP
99119+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
99120+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
99121+#endif
99122+ else if (offset)
99123+ return offset <= vma->vm_start - addr - len;
99124+
99125+ return true;
99126+}
99127+
99128+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
99129+{
99130+ if (vma->vm_start < len)
99131+ return -ENOMEM;
99132+
99133+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
99134+ if (offset <= vma->vm_start - len)
99135+ return vma->vm_start - len - offset;
99136+ else
99137+ return -ENOMEM;
99138+ }
99139+
99140+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
99141+ return vma->vm_start - len - sysctl_heap_stack_gap;
99142+ return -ENOMEM;
99143+}
99144+
99145+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
99146 {
99147 /*
99148 * We implement the search by looking for an rbtree node that
99149@@ -1738,11 +1968,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
99150 }
99151 }
99152
99153- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
99154+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
99155 check_current:
99156 /* Check if current node has a suitable gap */
99157 if (gap_start > high_limit)
99158 return -ENOMEM;
99159+
99160+ if (gap_end - gap_start > info->threadstack_offset)
99161+ gap_start += info->threadstack_offset;
99162+ else
99163+ gap_start = gap_end;
99164+
99165+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
99166+ if (gap_end - gap_start > sysctl_heap_stack_gap)
99167+ gap_start += sysctl_heap_stack_gap;
99168+ else
99169+ gap_start = gap_end;
99170+ }
99171+ if (vma->vm_flags & VM_GROWSDOWN) {
99172+ if (gap_end - gap_start > sysctl_heap_stack_gap)
99173+ gap_end -= sysctl_heap_stack_gap;
99174+ else
99175+ gap_end = gap_start;
99176+ }
99177 if (gap_end >= low_limit && gap_end - gap_start >= length)
99178 goto found;
99179
99180@@ -1792,7 +2040,7 @@ found:
99181 return gap_start;
99182 }
99183
99184-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
99185+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
99186 {
99187 struct mm_struct *mm = current->mm;
99188 struct vm_area_struct *vma;
99189@@ -1846,6 +2094,24 @@ check_current:
99190 gap_end = vma->vm_start;
99191 if (gap_end < low_limit)
99192 return -ENOMEM;
99193+
99194+ if (gap_end - gap_start > info->threadstack_offset)
99195+ gap_end -= info->threadstack_offset;
99196+ else
99197+ gap_end = gap_start;
99198+
99199+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
99200+ if (gap_end - gap_start > sysctl_heap_stack_gap)
99201+ gap_start += sysctl_heap_stack_gap;
99202+ else
99203+ gap_start = gap_end;
99204+ }
99205+ if (vma->vm_flags & VM_GROWSDOWN) {
99206+ if (gap_end - gap_start > sysctl_heap_stack_gap)
99207+ gap_end -= sysctl_heap_stack_gap;
99208+ else
99209+ gap_end = gap_start;
99210+ }
99211 if (gap_start <= high_limit && gap_end - gap_start >= length)
99212 goto found;
99213
99214@@ -1909,6 +2175,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
99215 struct mm_struct *mm = current->mm;
99216 struct vm_area_struct *vma;
99217 struct vm_unmapped_area_info info;
99218+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
99219
99220 if (len > TASK_SIZE - mmap_min_addr)
99221 return -ENOMEM;
99222@@ -1916,11 +2183,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
99223 if (flags & MAP_FIXED)
99224 return addr;
99225
99226+#ifdef CONFIG_PAX_RANDMMAP
99227+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
99228+#endif
99229+
99230 if (addr) {
99231 addr = PAGE_ALIGN(addr);
99232 vma = find_vma(mm, addr);
99233 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
99234- (!vma || addr + len <= vma->vm_start))
99235+ check_heap_stack_gap(vma, addr, len, offset))
99236 return addr;
99237 }
99238
99239@@ -1929,6 +2200,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
99240 info.low_limit = mm->mmap_base;
99241 info.high_limit = TASK_SIZE;
99242 info.align_mask = 0;
99243+ info.threadstack_offset = offset;
99244 return vm_unmapped_area(&info);
99245 }
99246 #endif
99247@@ -1947,6 +2219,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
99248 struct mm_struct *mm = current->mm;
99249 unsigned long addr = addr0;
99250 struct vm_unmapped_area_info info;
99251+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
99252
99253 /* requested length too big for entire address space */
99254 if (len > TASK_SIZE - mmap_min_addr)
99255@@ -1955,12 +2228,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
99256 if (flags & MAP_FIXED)
99257 return addr;
99258
99259+#ifdef CONFIG_PAX_RANDMMAP
99260+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
99261+#endif
99262+
99263 /* requesting a specific address */
99264 if (addr) {
99265 addr = PAGE_ALIGN(addr);
99266 vma = find_vma(mm, addr);
99267 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
99268- (!vma || addr + len <= vma->vm_start))
99269+ check_heap_stack_gap(vma, addr, len, offset))
99270 return addr;
99271 }
99272
99273@@ -1969,6 +2246,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
99274 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
99275 info.high_limit = mm->mmap_base;
99276 info.align_mask = 0;
99277+ info.threadstack_offset = offset;
99278 addr = vm_unmapped_area(&info);
99279
99280 /*
99281@@ -1981,6 +2259,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
99282 VM_BUG_ON(addr != -ENOMEM);
99283 info.flags = 0;
99284 info.low_limit = TASK_UNMAPPED_BASE;
99285+
99286+#ifdef CONFIG_PAX_RANDMMAP
99287+ if (mm->pax_flags & MF_PAX_RANDMMAP)
99288+ info.low_limit += mm->delta_mmap;
99289+#endif
99290+
99291 info.high_limit = TASK_SIZE;
99292 addr = vm_unmapped_area(&info);
99293 }
99294@@ -2081,6 +2365,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
99295 return vma;
99296 }
99297
99298+#ifdef CONFIG_PAX_SEGMEXEC
99299+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
99300+{
99301+ struct vm_area_struct *vma_m;
99302+
99303+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
99304+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
99305+ BUG_ON(vma->vm_mirror);
99306+ return NULL;
99307+ }
99308+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
99309+ vma_m = vma->vm_mirror;
99310+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
99311+ BUG_ON(vma->vm_file != vma_m->vm_file);
99312+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
99313+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
99314+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
99315+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
99316+ return vma_m;
99317+}
99318+#endif
99319+
99320 /*
99321 * Verify that the stack growth is acceptable and
99322 * update accounting. This is shared with both the
99323@@ -2098,8 +2404,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
99324
99325 /* Stack limit test */
99326 actual_size = size;
99327- if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
99328- actual_size -= PAGE_SIZE;
99329+ gr_learn_resource(current, RLIMIT_STACK, actual_size, 1);
99330 if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
99331 return -ENOMEM;
99332
99333@@ -2110,6 +2415,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
99334 locked = mm->locked_vm + grow;
99335 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
99336 limit >>= PAGE_SHIFT;
99337+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
99338 if (locked > limit && !capable(CAP_IPC_LOCK))
99339 return -ENOMEM;
99340 }
99341@@ -2139,37 +2445,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
99342 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
99343 * vma is the last one with address > vma->vm_end. Have to extend vma.
99344 */
99345+#ifndef CONFIG_IA64
99346+static
99347+#endif
99348 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
99349 {
99350 int error;
99351+ bool locknext;
99352
99353 if (!(vma->vm_flags & VM_GROWSUP))
99354 return -EFAULT;
99355
99356+ /* Also guard against wrapping around to address 0. */
99357+ if (address < PAGE_ALIGN(address+1))
99358+ address = PAGE_ALIGN(address+1);
99359+ else
99360+ return -ENOMEM;
99361+
99362 /*
99363 * We must make sure the anon_vma is allocated
99364 * so that the anon_vma locking is not a noop.
99365 */
99366 if (unlikely(anon_vma_prepare(vma)))
99367 return -ENOMEM;
99368+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
99369+ if (locknext && anon_vma_prepare(vma->vm_next))
99370+ return -ENOMEM;
99371 vma_lock_anon_vma(vma);
99372+ if (locknext)
99373+ vma_lock_anon_vma(vma->vm_next);
99374
99375 /*
99376 * vma->vm_start/vm_end cannot change under us because the caller
99377 * is required to hold the mmap_sem in read mode. We need the
99378- * anon_vma lock to serialize against concurrent expand_stacks.
99379- * Also guard against wrapping around to address 0.
99380+ * anon_vma locks to serialize against concurrent expand_stacks
99381+ * and expand_upwards.
99382 */
99383- if (address < PAGE_ALIGN(address+4))
99384- address = PAGE_ALIGN(address+4);
99385- else {
99386- vma_unlock_anon_vma(vma);
99387- return -ENOMEM;
99388- }
99389 error = 0;
99390
99391 /* Somebody else might have raced and expanded it already */
99392- if (address > vma->vm_end) {
99393+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
99394+ error = -ENOMEM;
99395+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
99396 unsigned long size, grow;
99397
99398 size = address - vma->vm_start;
99399@@ -2204,6 +2521,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
99400 }
99401 }
99402 }
99403+ if (locknext)
99404+ vma_unlock_anon_vma(vma->vm_next);
99405 vma_unlock_anon_vma(vma);
99406 khugepaged_enter_vma_merge(vma, vma->vm_flags);
99407 validate_mm(vma->vm_mm);
99408@@ -2218,6 +2537,8 @@ int expand_downwards(struct vm_area_struct *vma,
99409 unsigned long address)
99410 {
99411 int error;
99412+ bool lockprev = false;
99413+ struct vm_area_struct *prev;
99414
99415 /*
99416 * We must make sure the anon_vma is allocated
99417@@ -2231,6 +2552,15 @@ int expand_downwards(struct vm_area_struct *vma,
99418 if (error)
99419 return error;
99420
99421+ prev = vma->vm_prev;
99422+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
99423+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
99424+#endif
99425+ if (lockprev && anon_vma_prepare(prev))
99426+ return -ENOMEM;
99427+ if (lockprev)
99428+ vma_lock_anon_vma(prev);
99429+
99430 vma_lock_anon_vma(vma);
99431
99432 /*
99433@@ -2240,9 +2570,17 @@ int expand_downwards(struct vm_area_struct *vma,
99434 */
99435
99436 /* Somebody else might have raced and expanded it already */
99437- if (address < vma->vm_start) {
99438+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
99439+ error = -ENOMEM;
99440+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
99441 unsigned long size, grow;
99442
99443+#ifdef CONFIG_PAX_SEGMEXEC
99444+ struct vm_area_struct *vma_m;
99445+
99446+ vma_m = pax_find_mirror_vma(vma);
99447+#endif
99448+
99449 size = vma->vm_end - address;
99450 grow = (vma->vm_start - address) >> PAGE_SHIFT;
99451
99452@@ -2267,13 +2605,27 @@ int expand_downwards(struct vm_area_struct *vma,
99453 vma->vm_pgoff -= grow;
99454 anon_vma_interval_tree_post_update_vma(vma);
99455 vma_gap_update(vma);
99456+
99457+#ifdef CONFIG_PAX_SEGMEXEC
99458+ if (vma_m) {
99459+ anon_vma_interval_tree_pre_update_vma(vma_m);
99460+ vma_m->vm_start -= grow << PAGE_SHIFT;
99461+ vma_m->vm_pgoff -= grow;
99462+ anon_vma_interval_tree_post_update_vma(vma_m);
99463+ vma_gap_update(vma_m);
99464+ }
99465+#endif
99466+
99467 spin_unlock(&vma->vm_mm->page_table_lock);
99468
99469+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
99470 perf_event_mmap(vma);
99471 }
99472 }
99473 }
99474 vma_unlock_anon_vma(vma);
99475+ if (lockprev)
99476+ vma_unlock_anon_vma(prev);
99477 khugepaged_enter_vma_merge(vma, vma->vm_flags);
99478 validate_mm(vma->vm_mm);
99479 return error;
99480@@ -2373,6 +2725,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
99481 do {
99482 long nrpages = vma_pages(vma);
99483
99484+#ifdef CONFIG_PAX_SEGMEXEC
99485+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
99486+ vma = remove_vma(vma);
99487+ continue;
99488+ }
99489+#endif
99490+
99491 if (vma->vm_flags & VM_ACCOUNT)
99492 nr_accounted += nrpages;
99493 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
99494@@ -2417,6 +2776,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
99495 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
99496 vma->vm_prev = NULL;
99497 do {
99498+
99499+#ifdef CONFIG_PAX_SEGMEXEC
99500+ if (vma->vm_mirror) {
99501+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
99502+ vma->vm_mirror->vm_mirror = NULL;
99503+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
99504+ vma->vm_mirror = NULL;
99505+ }
99506+#endif
99507+
99508 vma_rb_erase(vma, &mm->mm_rb);
99509 mm->map_count--;
99510 tail_vma = vma;
99511@@ -2444,14 +2813,33 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
99512 struct vm_area_struct *new;
99513 int err = -ENOMEM;
99514
99515+#ifdef CONFIG_PAX_SEGMEXEC
99516+ struct vm_area_struct *vma_m, *new_m = NULL;
99517+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
99518+#endif
99519+
99520 if (is_vm_hugetlb_page(vma) && (addr &
99521 ~(huge_page_mask(hstate_vma(vma)))))
99522 return -EINVAL;
99523
99524+#ifdef CONFIG_PAX_SEGMEXEC
99525+ vma_m = pax_find_mirror_vma(vma);
99526+#endif
99527+
99528 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
99529 if (!new)
99530 goto out_err;
99531
99532+#ifdef CONFIG_PAX_SEGMEXEC
99533+ if (vma_m) {
99534+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
99535+ if (!new_m) {
99536+ kmem_cache_free(vm_area_cachep, new);
99537+ goto out_err;
99538+ }
99539+ }
99540+#endif
99541+
99542 /* most fields are the same, copy all, and then fixup */
99543 *new = *vma;
99544
99545@@ -2464,6 +2852,22 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
99546 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
99547 }
99548
99549+#ifdef CONFIG_PAX_SEGMEXEC
99550+ if (vma_m) {
99551+ *new_m = *vma_m;
99552+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
99553+ new_m->vm_mirror = new;
99554+ new->vm_mirror = new_m;
99555+
99556+ if (new_below)
99557+ new_m->vm_end = addr_m;
99558+ else {
99559+ new_m->vm_start = addr_m;
99560+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
99561+ }
99562+ }
99563+#endif
99564+
99565 err = vma_dup_policy(vma, new);
99566 if (err)
99567 goto out_free_vma;
99568@@ -2484,6 +2888,38 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
99569 else
99570 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
99571
99572+#ifdef CONFIG_PAX_SEGMEXEC
99573+ if (!err && vma_m) {
99574+ struct mempolicy *pol = vma_policy(new);
99575+
99576+ if (anon_vma_clone(new_m, vma_m))
99577+ goto out_free_mpol;
99578+
99579+ mpol_get(pol);
99580+ set_vma_policy(new_m, pol);
99581+
99582+ if (new_m->vm_file)
99583+ get_file(new_m->vm_file);
99584+
99585+ if (new_m->vm_ops && new_m->vm_ops->open)
99586+ new_m->vm_ops->open(new_m);
99587+
99588+ if (new_below)
99589+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
99590+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
99591+ else
99592+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
99593+
99594+ if (err) {
99595+ if (new_m->vm_ops && new_m->vm_ops->close)
99596+ new_m->vm_ops->close(new_m);
99597+ if (new_m->vm_file)
99598+ fput(new_m->vm_file);
99599+ mpol_put(pol);
99600+ }
99601+ }
99602+#endif
99603+
99604 /* Success. */
99605 if (!err)
99606 return 0;
99607@@ -2493,10 +2929,18 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
99608 new->vm_ops->close(new);
99609 if (new->vm_file)
99610 fput(new->vm_file);
99611- unlink_anon_vmas(new);
99612 out_free_mpol:
99613 mpol_put(vma_policy(new));
99614 out_free_vma:
99615+
99616+#ifdef CONFIG_PAX_SEGMEXEC
99617+ if (new_m) {
99618+ unlink_anon_vmas(new_m);
99619+ kmem_cache_free(vm_area_cachep, new_m);
99620+ }
99621+#endif
99622+
99623+ unlink_anon_vmas(new);
99624 kmem_cache_free(vm_area_cachep, new);
99625 out_err:
99626 return err;
99627@@ -2509,6 +2953,15 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
99628 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
99629 unsigned long addr, int new_below)
99630 {
99631+
99632+#ifdef CONFIG_PAX_SEGMEXEC
99633+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
99634+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
99635+ if (mm->map_count >= sysctl_max_map_count-1)
99636+ return -ENOMEM;
99637+ } else
99638+#endif
99639+
99640 if (mm->map_count >= sysctl_max_map_count)
99641 return -ENOMEM;
99642
99643@@ -2520,11 +2973,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
99644 * work. This now handles partial unmappings.
99645 * Jeremy Fitzhardinge <jeremy@goop.org>
99646 */
99647+#ifdef CONFIG_PAX_SEGMEXEC
99648 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
99649 {
99650+ int ret = __do_munmap(mm, start, len);
99651+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
99652+ return ret;
99653+
99654+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
99655+}
99656+
99657+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
99658+#else
99659+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
99660+#endif
99661+{
99662 unsigned long end;
99663 struct vm_area_struct *vma, *prev, *last;
99664
99665+ /*
99666+ * mm->mmap_sem is required to protect against another thread
99667+ * changing the mappings in case we sleep.
99668+ */
99669+ verify_mm_writelocked(mm);
99670+
99671 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
99672 return -EINVAL;
99673
99674@@ -2602,6 +3074,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
99675 /* Fix up all other VM information */
99676 remove_vma_list(mm, vma);
99677
99678+ track_exec_limit(mm, start, end, 0UL);
99679+
99680 return 0;
99681 }
99682
99683@@ -2610,6 +3084,13 @@ int vm_munmap(unsigned long start, size_t len)
99684 int ret;
99685 struct mm_struct *mm = current->mm;
99686
99687+
99688+#ifdef CONFIG_PAX_SEGMEXEC
99689+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
99690+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
99691+ return -EINVAL;
99692+#endif
99693+
99694 down_write(&mm->mmap_sem);
99695 ret = do_munmap(mm, start, len);
99696 up_write(&mm->mmap_sem);
99697@@ -2656,6 +3137,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
99698 down_write(&mm->mmap_sem);
99699 vma = find_vma(mm, start);
99700
99701+#ifdef CONFIG_PAX_SEGMEXEC
99702+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
99703+ goto out;
99704+#endif
99705+
99706 if (!vma || !(vma->vm_flags & VM_SHARED))
99707 goto out;
99708
99709@@ -2692,16 +3178,6 @@ out:
99710 return ret;
99711 }
99712
99713-static inline void verify_mm_writelocked(struct mm_struct *mm)
99714-{
99715-#ifdef CONFIG_DEBUG_VM
99716- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
99717- WARN_ON(1);
99718- up_read(&mm->mmap_sem);
99719- }
99720-#endif
99721-}
99722-
99723 /*
99724 * this is really a simplified "do_mmap". it only handles
99725 * anonymous maps. eventually we may be able to do some
99726@@ -2715,6 +3191,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
99727 struct rb_node **rb_link, *rb_parent;
99728 pgoff_t pgoff = addr >> PAGE_SHIFT;
99729 int error;
99730+ unsigned long charged;
99731
99732 len = PAGE_ALIGN(len);
99733 if (!len)
99734@@ -2722,10 +3199,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
99735
99736 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
99737
99738+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
99739+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
99740+ flags &= ~VM_EXEC;
99741+
99742+#ifdef CONFIG_PAX_MPROTECT
99743+ if (mm->pax_flags & MF_PAX_MPROTECT)
99744+ flags &= ~VM_MAYEXEC;
99745+#endif
99746+
99747+ }
99748+#endif
99749+
99750 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
99751 if (error & ~PAGE_MASK)
99752 return error;
99753
99754+ charged = len >> PAGE_SHIFT;
99755+
99756 error = mlock_future_check(mm, mm->def_flags, len);
99757 if (error)
99758 return error;
99759@@ -2739,21 +3230,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
99760 /*
99761 * Clear old maps. this also does some error checking for us
99762 */
99763- munmap_back:
99764 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
99765 if (do_munmap(mm, addr, len))
99766 return -ENOMEM;
99767- goto munmap_back;
99768+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
99769 }
99770
99771 /* Check against address space limits *after* clearing old maps... */
99772- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
99773+ if (!may_expand_vm(mm, charged))
99774 return -ENOMEM;
99775
99776 if (mm->map_count > sysctl_max_map_count)
99777 return -ENOMEM;
99778
99779- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
99780+ if (security_vm_enough_memory_mm(mm, charged))
99781 return -ENOMEM;
99782
99783 /* Can we just expand an old private anonymous mapping? */
99784@@ -2767,7 +3257,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
99785 */
99786 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
99787 if (!vma) {
99788- vm_unacct_memory(len >> PAGE_SHIFT);
99789+ vm_unacct_memory(charged);
99790 return -ENOMEM;
99791 }
99792
99793@@ -2781,10 +3271,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
99794 vma_link(mm, vma, prev, rb_link, rb_parent);
99795 out:
99796 perf_event_mmap(vma);
99797- mm->total_vm += len >> PAGE_SHIFT;
99798+ mm->total_vm += charged;
99799 if (flags & VM_LOCKED)
99800- mm->locked_vm += (len >> PAGE_SHIFT);
99801+ mm->locked_vm += charged;
99802 vma->vm_flags |= VM_SOFTDIRTY;
99803+ track_exec_limit(mm, addr, addr + len, flags);
99804 return addr;
99805 }
99806
99807@@ -2846,6 +3337,7 @@ void exit_mmap(struct mm_struct *mm)
99808 while (vma) {
99809 if (vma->vm_flags & VM_ACCOUNT)
99810 nr_accounted += vma_pages(vma);
99811+ vma->vm_mirror = NULL;
99812 vma = remove_vma(vma);
99813 }
99814 vm_unacct_memory(nr_accounted);
99815@@ -2860,6 +3352,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
99816 struct vm_area_struct *prev;
99817 struct rb_node **rb_link, *rb_parent;
99818
99819+#ifdef CONFIG_PAX_SEGMEXEC
99820+ struct vm_area_struct *vma_m = NULL;
99821+#endif
99822+
99823+ if (security_mmap_addr(vma->vm_start))
99824+ return -EPERM;
99825+
99826 /*
99827 * The vm_pgoff of a purely anonymous vma should be irrelevant
99828 * until its first write fault, when page's anon_vma and index
99829@@ -2883,7 +3382,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
99830 security_vm_enough_memory_mm(mm, vma_pages(vma)))
99831 return -ENOMEM;
99832
99833+#ifdef CONFIG_PAX_SEGMEXEC
99834+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
99835+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
99836+ if (!vma_m)
99837+ return -ENOMEM;
99838+ }
99839+#endif
99840+
99841 vma_link(mm, vma, prev, rb_link, rb_parent);
99842+
99843+#ifdef CONFIG_PAX_SEGMEXEC
99844+ if (vma_m)
99845+ BUG_ON(pax_mirror_vma(vma_m, vma));
99846+#endif
99847+
99848 return 0;
99849 }
99850
99851@@ -2902,6 +3415,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
99852 struct rb_node **rb_link, *rb_parent;
99853 bool faulted_in_anon_vma = true;
99854
99855+ BUG_ON(vma->vm_mirror);
99856+
99857 /*
99858 * If anonymous vma has not yet been faulted, update new pgoff
99859 * to match new location, to increase its chance of merging.
99860@@ -2966,6 +3481,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
99861 return NULL;
99862 }
99863
99864+#ifdef CONFIG_PAX_SEGMEXEC
99865+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
99866+{
99867+ struct vm_area_struct *prev_m;
99868+ struct rb_node **rb_link_m, *rb_parent_m;
99869+ struct mempolicy *pol_m;
99870+
99871+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
99872+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
99873+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
99874+ *vma_m = *vma;
99875+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
99876+ if (anon_vma_clone(vma_m, vma))
99877+ return -ENOMEM;
99878+ pol_m = vma_policy(vma_m);
99879+ mpol_get(pol_m);
99880+ set_vma_policy(vma_m, pol_m);
99881+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
99882+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
99883+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
99884+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
99885+ if (vma_m->vm_file)
99886+ get_file(vma_m->vm_file);
99887+ if (vma_m->vm_ops && vma_m->vm_ops->open)
99888+ vma_m->vm_ops->open(vma_m);
99889+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
99890+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
99891+ vma_m->vm_mirror = vma;
99892+ vma->vm_mirror = vma_m;
99893+ return 0;
99894+}
99895+#endif
99896+
99897 /*
99898 * Return true if the calling process may expand its vm space by the passed
99899 * number of pages
99900@@ -2977,6 +3525,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
99901
99902 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
99903
99904+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
99905 if (cur + npages > lim)
99906 return 0;
99907 return 1;
99908@@ -3059,6 +3608,22 @@ static struct vm_area_struct *__install_special_mapping(
99909 vma->vm_start = addr;
99910 vma->vm_end = addr + len;
99911
99912+#ifdef CONFIG_PAX_MPROTECT
99913+ if (mm->pax_flags & MF_PAX_MPROTECT) {
99914+#ifndef CONFIG_PAX_MPROTECT_COMPAT
99915+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
99916+ return ERR_PTR(-EPERM);
99917+ if (!(vm_flags & VM_EXEC))
99918+ vm_flags &= ~VM_MAYEXEC;
99919+#else
99920+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
99921+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
99922+#endif
99923+ else
99924+ vm_flags &= ~VM_MAYWRITE;
99925+ }
99926+#endif
99927+
99928 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
99929 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
99930
99931diff --git a/mm/mprotect.c b/mm/mprotect.c
99932index 8858483..8145fa5 100644
99933--- a/mm/mprotect.c
99934+++ b/mm/mprotect.c
99935@@ -24,10 +24,18 @@
99936 #include <linux/migrate.h>
99937 #include <linux/perf_event.h>
99938 #include <linux/ksm.h>
99939+#include <linux/sched/sysctl.h>
99940+
99941+#ifdef CONFIG_PAX_MPROTECT
99942+#include <linux/elf.h>
99943+#include <linux/binfmts.h>
99944+#endif
99945+
99946 #include <asm/uaccess.h>
99947 #include <asm/pgtable.h>
99948 #include <asm/cacheflush.h>
99949 #include <asm/tlbflush.h>
99950+#include <asm/mmu_context.h>
99951
99952 /*
99953 * For a prot_numa update we only hold mmap_sem for read so there is a
99954@@ -252,6 +260,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
99955 return pages;
99956 }
99957
99958+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
99959+/* called while holding the mmap semaphor for writing except stack expansion */
99960+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
99961+{
99962+ unsigned long oldlimit, newlimit = 0UL;
99963+
99964+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
99965+ return;
99966+
99967+ spin_lock(&mm->page_table_lock);
99968+ oldlimit = mm->context.user_cs_limit;
99969+ if ((prot & VM_EXEC) && oldlimit < end)
99970+ /* USER_CS limit moved up */
99971+ newlimit = end;
99972+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
99973+ /* USER_CS limit moved down */
99974+ newlimit = start;
99975+
99976+ if (newlimit) {
99977+ mm->context.user_cs_limit = newlimit;
99978+
99979+#ifdef CONFIG_SMP
99980+ wmb();
99981+ cpus_clear(mm->context.cpu_user_cs_mask);
99982+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
99983+#endif
99984+
99985+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
99986+ }
99987+ spin_unlock(&mm->page_table_lock);
99988+ if (newlimit == end) {
99989+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
99990+
99991+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
99992+ if (is_vm_hugetlb_page(vma))
99993+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
99994+ else
99995+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
99996+ }
99997+}
99998+#endif
99999+
100000 int
100001 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
100002 unsigned long start, unsigned long end, unsigned long newflags)
100003@@ -264,11 +314,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
100004 int error;
100005 int dirty_accountable = 0;
100006
100007+#ifdef CONFIG_PAX_SEGMEXEC
100008+ struct vm_area_struct *vma_m = NULL;
100009+ unsigned long start_m, end_m;
100010+
100011+ start_m = start + SEGMEXEC_TASK_SIZE;
100012+ end_m = end + SEGMEXEC_TASK_SIZE;
100013+#endif
100014+
100015 if (newflags == oldflags) {
100016 *pprev = vma;
100017 return 0;
100018 }
100019
100020+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
100021+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
100022+
100023+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
100024+ return -ENOMEM;
100025+
100026+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
100027+ return -ENOMEM;
100028+ }
100029+
100030 /*
100031 * If we make a private mapping writable we increase our commit;
100032 * but (without finer accounting) cannot reduce our commit if we
100033@@ -285,6 +353,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
100034 }
100035 }
100036
100037+#ifdef CONFIG_PAX_SEGMEXEC
100038+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
100039+ if (start != vma->vm_start) {
100040+ error = split_vma(mm, vma, start, 1);
100041+ if (error)
100042+ goto fail;
100043+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
100044+ *pprev = (*pprev)->vm_next;
100045+ }
100046+
100047+ if (end != vma->vm_end) {
100048+ error = split_vma(mm, vma, end, 0);
100049+ if (error)
100050+ goto fail;
100051+ }
100052+
100053+ if (pax_find_mirror_vma(vma)) {
100054+ error = __do_munmap(mm, start_m, end_m - start_m);
100055+ if (error)
100056+ goto fail;
100057+ } else {
100058+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
100059+ if (!vma_m) {
100060+ error = -ENOMEM;
100061+ goto fail;
100062+ }
100063+ vma->vm_flags = newflags;
100064+ error = pax_mirror_vma(vma_m, vma);
100065+ if (error) {
100066+ vma->vm_flags = oldflags;
100067+ goto fail;
100068+ }
100069+ }
100070+ }
100071+#endif
100072+
100073 /*
100074 * First try to merge with previous and/or next vma.
100075 */
100076@@ -315,7 +419,19 @@ success:
100077 * vm_flags and vm_page_prot are protected by the mmap_sem
100078 * held in write mode.
100079 */
100080+
100081+#ifdef CONFIG_PAX_SEGMEXEC
100082+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
100083+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
100084+#endif
100085+
100086 vma->vm_flags = newflags;
100087+
100088+#ifdef CONFIG_PAX_MPROTECT
100089+ if (mm->binfmt && mm->binfmt->handle_mprotect)
100090+ mm->binfmt->handle_mprotect(vma, newflags);
100091+#endif
100092+
100093 dirty_accountable = vma_wants_writenotify(vma);
100094 vma_set_page_prot(vma);
100095
100096@@ -351,6 +467,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
100097 end = start + len;
100098 if (end <= start)
100099 return -ENOMEM;
100100+
100101+#ifdef CONFIG_PAX_SEGMEXEC
100102+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
100103+ if (end > SEGMEXEC_TASK_SIZE)
100104+ return -EINVAL;
100105+ } else
100106+#endif
100107+
100108+ if (end > TASK_SIZE)
100109+ return -EINVAL;
100110+
100111 if (!arch_validate_prot(prot))
100112 return -EINVAL;
100113
100114@@ -358,7 +485,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
100115 /*
100116 * Does the application expect PROT_READ to imply PROT_EXEC:
100117 */
100118- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
100119+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
100120 prot |= PROT_EXEC;
100121
100122 vm_flags = calc_vm_prot_bits(prot);
100123@@ -390,6 +517,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
100124 if (start > vma->vm_start)
100125 prev = vma;
100126
100127+#ifdef CONFIG_PAX_MPROTECT
100128+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
100129+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
100130+#endif
100131+
100132 for (nstart = start ; ; ) {
100133 unsigned long newflags;
100134
100135@@ -400,6 +532,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
100136
100137 /* newflags >> 4 shift VM_MAY% in place of VM_% */
100138 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
100139+ if (prot & (PROT_WRITE | PROT_EXEC))
100140+ gr_log_rwxmprotect(vma);
100141+
100142+ error = -EACCES;
100143+ goto out;
100144+ }
100145+
100146+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
100147 error = -EACCES;
100148 goto out;
100149 }
100150@@ -414,6 +554,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
100151 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
100152 if (error)
100153 goto out;
100154+
100155+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
100156+
100157 nstart = tmp;
100158
100159 if (nstart < prev->vm_end)
100160diff --git a/mm/mremap.c b/mm/mremap.c
100161index 2dc44b1..caa1819 100644
100162--- a/mm/mremap.c
100163+++ b/mm/mremap.c
100164@@ -142,6 +142,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
100165 continue;
100166 pte = ptep_get_and_clear(mm, old_addr, old_pte);
100167 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
100168+
100169+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
100170+ if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
100171+ pte = pte_exprotect(pte);
100172+#endif
100173+
100174 pte = move_soft_dirty_pte(pte);
100175 set_pte_at(mm, new_addr, new_pte, pte);
100176 }
100177@@ -350,6 +356,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
100178 if (is_vm_hugetlb_page(vma))
100179 goto Einval;
100180
100181+#ifdef CONFIG_PAX_SEGMEXEC
100182+ if (pax_find_mirror_vma(vma))
100183+ goto Einval;
100184+#endif
100185+
100186 /* We can't remap across vm area boundaries */
100187 if (old_len > vma->vm_end - addr)
100188 goto Efault;
100189@@ -405,20 +416,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
100190 unsigned long ret = -EINVAL;
100191 unsigned long charged = 0;
100192 unsigned long map_flags;
100193+ unsigned long pax_task_size = TASK_SIZE;
100194
100195 if (new_addr & ~PAGE_MASK)
100196 goto out;
100197
100198- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
100199+#ifdef CONFIG_PAX_SEGMEXEC
100200+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
100201+ pax_task_size = SEGMEXEC_TASK_SIZE;
100202+#endif
100203+
100204+ pax_task_size -= PAGE_SIZE;
100205+
100206+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
100207 goto out;
100208
100209 /* Check if the location we're moving into overlaps the
100210 * old location at all, and fail if it does.
100211 */
100212- if ((new_addr <= addr) && (new_addr+new_len) > addr)
100213- goto out;
100214-
100215- if ((addr <= new_addr) && (addr+old_len) > new_addr)
100216+ if (addr + old_len > new_addr && new_addr + new_len > addr)
100217 goto out;
100218
100219 ret = do_munmap(mm, new_addr, new_len);
100220@@ -487,6 +503,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
100221 unsigned long ret = -EINVAL;
100222 unsigned long charged = 0;
100223 bool locked = false;
100224+ unsigned long pax_task_size = TASK_SIZE;
100225
100226 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
100227 return ret;
100228@@ -508,6 +525,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
100229 if (!new_len)
100230 return ret;
100231
100232+#ifdef CONFIG_PAX_SEGMEXEC
100233+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
100234+ pax_task_size = SEGMEXEC_TASK_SIZE;
100235+#endif
100236+
100237+ pax_task_size -= PAGE_SIZE;
100238+
100239+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
100240+ old_len > pax_task_size || addr > pax_task_size-old_len)
100241+ return ret;
100242+
100243 down_write(&current->mm->mmap_sem);
100244
100245 if (flags & MREMAP_FIXED) {
100246@@ -558,6 +586,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
100247 new_addr = addr;
100248 }
100249 ret = addr;
100250+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
100251 goto out;
100252 }
100253 }
100254@@ -581,7 +610,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
100255 goto out;
100256 }
100257
100258+ map_flags = vma->vm_flags;
100259 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
100260+ if (!(ret & ~PAGE_MASK)) {
100261+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
100262+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
100263+ }
100264 }
100265 out:
100266 if (ret & ~PAGE_MASK)
100267diff --git a/mm/nommu.c b/mm/nommu.c
100268index 3fba2dc..fdad748 100644
100269--- a/mm/nommu.c
100270+++ b/mm/nommu.c
100271@@ -72,7 +72,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
100272 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
100273 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
100274 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
100275-int heap_stack_gap = 0;
100276
100277 atomic_long_t mmap_pages_allocated;
100278
100279@@ -892,15 +891,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
100280 EXPORT_SYMBOL(find_vma);
100281
100282 /*
100283- * find a VMA
100284- * - we don't extend stack VMAs under NOMMU conditions
100285- */
100286-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
100287-{
100288- return find_vma(mm, addr);
100289-}
100290-
100291-/*
100292 * expand a stack to a given address
100293 * - not supported under NOMMU conditions
100294 */
100295@@ -1585,6 +1575,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
100296
100297 /* most fields are the same, copy all, and then fixup */
100298 *new = *vma;
100299+ INIT_LIST_HEAD(&new->anon_vma_chain);
100300 *region = *vma->vm_region;
100301 new->vm_region = region;
100302
100303@@ -2007,8 +1998,8 @@ void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf)
100304 }
100305 EXPORT_SYMBOL(filemap_map_pages);
100306
100307-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
100308- unsigned long addr, void *buf, int len, int write)
100309+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
100310+ unsigned long addr, void *buf, size_t len, int write)
100311 {
100312 struct vm_area_struct *vma;
100313
100314@@ -2049,8 +2040,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
100315 *
100316 * The caller must hold a reference on @mm.
100317 */
100318-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
100319- void *buf, int len, int write)
100320+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
100321+ void *buf, size_t len, int write)
100322 {
100323 return __access_remote_vm(NULL, mm, addr, buf, len, write);
100324 }
100325@@ -2059,7 +2050,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
100326 * Access another process' address space.
100327 * - source/target buffer must be kernel space
100328 */
100329-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
100330+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
100331 {
100332 struct mm_struct *mm;
100333
100334diff --git a/mm/page-writeback.c b/mm/page-writeback.c
100335index ad05f2f..cee723a 100644
100336--- a/mm/page-writeback.c
100337+++ b/mm/page-writeback.c
100338@@ -664,7 +664,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
100339 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
100340 * - the bdi dirty thresh drops quickly due to change of JBOD workload
100341 */
100342-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
100343+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
100344 unsigned long thresh,
100345 unsigned long bg_thresh,
100346 unsigned long dirty,
100347diff --git a/mm/page_alloc.c b/mm/page_alloc.c
100348index 40e2942..0eb29a2 100644
100349--- a/mm/page_alloc.c
100350+++ b/mm/page_alloc.c
100351@@ -61,6 +61,7 @@
100352 #include <linux/hugetlb.h>
100353 #include <linux/sched/rt.h>
100354 #include <linux/page_owner.h>
100355+#include <linux/random.h>
100356
100357 #include <asm/sections.h>
100358 #include <asm/tlbflush.h>
100359@@ -357,7 +358,7 @@ out:
100360 * This usage means that zero-order pages may not be compound.
100361 */
100362
100363-static void free_compound_page(struct page *page)
100364+void free_compound_page(struct page *page)
100365 {
100366 __free_pages_ok(page, compound_order(page));
100367 }
100368@@ -480,7 +481,7 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
100369 __mod_zone_freepage_state(zone, (1 << order), migratetype);
100370 }
100371 #else
100372-struct page_ext_operations debug_guardpage_ops = { NULL, };
100373+struct page_ext_operations debug_guardpage_ops = { .need = NULL, .init = NULL };
100374 static inline void set_page_guard(struct zone *zone, struct page *page,
100375 unsigned int order, int migratetype) {}
100376 static inline void clear_page_guard(struct zone *zone, struct page *page,
100377@@ -783,6 +784,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
100378 bool compound = PageCompound(page);
100379 int i, bad = 0;
100380
100381+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100382+ unsigned long index = 1UL << order;
100383+#endif
100384+
100385 VM_BUG_ON_PAGE(PageTail(page), page);
100386 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
100387
100388@@ -809,6 +814,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
100389 debug_check_no_obj_freed(page_address(page),
100390 PAGE_SIZE << order);
100391 }
100392+
100393+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100394+ for (; index; --index)
100395+ sanitize_highpage(page + index - 1);
100396+#endif
100397+
100398 arch_free_page(page, order);
100399 kernel_map_pages(page, 1 << order, 0);
100400
100401@@ -832,6 +843,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
100402 local_irq_restore(flags);
100403 }
100404
100405+#ifdef CONFIG_PAX_LATENT_ENTROPY
100406+bool __meminitdata extra_latent_entropy;
100407+
100408+static int __init setup_pax_extra_latent_entropy(char *str)
100409+{
100410+ extra_latent_entropy = true;
100411+ return 0;
100412+}
100413+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
100414+
100415+volatile u64 latent_entropy __latent_entropy;
100416+EXPORT_SYMBOL(latent_entropy);
100417+#endif
100418+
100419 void __init __free_pages_bootmem(struct page *page, unsigned int order)
100420 {
100421 unsigned int nr_pages = 1 << order;
100422@@ -847,6 +872,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
100423 __ClearPageReserved(p);
100424 set_page_count(p, 0);
100425
100426+#ifdef CONFIG_PAX_LATENT_ENTROPY
100427+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
100428+ u64 hash = 0;
100429+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
100430+ const u64 *data = lowmem_page_address(page);
100431+
100432+ for (index = 0; index < end; index++)
100433+ hash ^= hash + data[index];
100434+ latent_entropy ^= hash;
100435+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
100436+ }
100437+#endif
100438+
100439 page_zone(page)->managed_pages += nr_pages;
100440 set_page_refcounted(page);
100441 __free_pages(page, order);
100442@@ -974,8 +1012,10 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
100443 kernel_map_pages(page, 1 << order, 1);
100444 kasan_alloc_pages(page, order);
100445
100446+#ifndef CONFIG_PAX_MEMORY_SANITIZE
100447 if (gfp_flags & __GFP_ZERO)
100448 prep_zero_page(page, order, gfp_flags);
100449+#endif
100450
100451 if (order && (gfp_flags & __GFP_COMP))
100452 prep_compound_page(page, order);
100453@@ -1699,7 +1739,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
100454 }
100455
100456 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
100457- if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
100458+ if (atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
100459 !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
100460 set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
100461
100462@@ -2018,7 +2058,7 @@ static void reset_alloc_batches(struct zone *preferred_zone)
100463 do {
100464 mod_zone_page_state(zone, NR_ALLOC_BATCH,
100465 high_wmark_pages(zone) - low_wmark_pages(zone) -
100466- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
100467+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
100468 clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
100469 } while (zone++ != preferred_zone);
100470 }
100471@@ -5738,7 +5778,7 @@ static void __setup_per_zone_wmarks(void)
100472
100473 __mod_zone_page_state(zone, NR_ALLOC_BATCH,
100474 high_wmark_pages(zone) - low_wmark_pages(zone) -
100475- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
100476+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
100477
100478 setup_zone_migrate_reserve(zone);
100479 spin_unlock_irqrestore(&zone->lock, flags);
100480diff --git a/mm/percpu.c b/mm/percpu.c
100481index 73c97a5..508ee25 100644
100482--- a/mm/percpu.c
100483+++ b/mm/percpu.c
100484@@ -131,7 +131,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
100485 static unsigned int pcpu_high_unit_cpu __read_mostly;
100486
100487 /* the address of the first chunk which starts with the kernel static area */
100488-void *pcpu_base_addr __read_mostly;
100489+void *pcpu_base_addr __read_only;
100490 EXPORT_SYMBOL_GPL(pcpu_base_addr);
100491
100492 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
100493diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
100494index b159769..d07037f 100644
100495--- a/mm/process_vm_access.c
100496+++ b/mm/process_vm_access.c
100497@@ -13,6 +13,7 @@
100498 #include <linux/uio.h>
100499 #include <linux/sched.h>
100500 #include <linux/highmem.h>
100501+#include <linux/security.h>
100502 #include <linux/ptrace.h>
100503 #include <linux/slab.h>
100504 #include <linux/syscalls.h>
100505@@ -154,19 +155,19 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
100506 ssize_t iov_len;
100507 size_t total_len = iov_iter_count(iter);
100508
100509+ return -ENOSYS; // PaX: until properly audited
100510+
100511 /*
100512 * Work out how many pages of struct pages we're going to need
100513 * when eventually calling get_user_pages
100514 */
100515 for (i = 0; i < riovcnt; i++) {
100516 iov_len = rvec[i].iov_len;
100517- if (iov_len > 0) {
100518- nr_pages_iov = ((unsigned long)rvec[i].iov_base
100519- + iov_len)
100520- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
100521- / PAGE_SIZE + 1;
100522- nr_pages = max(nr_pages, nr_pages_iov);
100523- }
100524+ if (iov_len <= 0)
100525+ continue;
100526+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
100527+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
100528+ nr_pages = max(nr_pages, nr_pages_iov);
100529 }
100530
100531 if (nr_pages == 0)
100532@@ -194,6 +195,11 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
100533 goto free_proc_pages;
100534 }
100535
100536+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
100537+ rc = -EPERM;
100538+ goto put_task_struct;
100539+ }
100540+
100541 mm = mm_access(task, PTRACE_MODE_ATTACH);
100542 if (!mm || IS_ERR(mm)) {
100543 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
100544diff --git a/mm/rmap.c b/mm/rmap.c
100545index c161a14..8a069bb 100644
100546--- a/mm/rmap.c
100547+++ b/mm/rmap.c
100548@@ -166,6 +166,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
100549 struct anon_vma *anon_vma = vma->anon_vma;
100550 struct anon_vma_chain *avc;
100551
100552+#ifdef CONFIG_PAX_SEGMEXEC
100553+ struct anon_vma_chain *avc_m = NULL;
100554+#endif
100555+
100556 might_sleep();
100557 if (unlikely(!anon_vma)) {
100558 struct mm_struct *mm = vma->vm_mm;
100559@@ -175,6 +179,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
100560 if (!avc)
100561 goto out_enomem;
100562
100563+#ifdef CONFIG_PAX_SEGMEXEC
100564+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
100565+ if (!avc_m)
100566+ goto out_enomem_free_avc;
100567+#endif
100568+
100569 anon_vma = find_mergeable_anon_vma(vma);
100570 allocated = NULL;
100571 if (!anon_vma) {
100572@@ -188,6 +198,19 @@ int anon_vma_prepare(struct vm_area_struct *vma)
100573 /* page_table_lock to protect against threads */
100574 spin_lock(&mm->page_table_lock);
100575 if (likely(!vma->anon_vma)) {
100576+
100577+#ifdef CONFIG_PAX_SEGMEXEC
100578+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
100579+
100580+ if (vma_m) {
100581+ BUG_ON(vma_m->anon_vma);
100582+ vma_m->anon_vma = anon_vma;
100583+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
100584+ anon_vma->degree++;
100585+ avc_m = NULL;
100586+ }
100587+#endif
100588+
100589 vma->anon_vma = anon_vma;
100590 anon_vma_chain_link(vma, avc, anon_vma);
100591 /* vma reference or self-parent link for new root */
100592@@ -200,12 +223,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
100593
100594 if (unlikely(allocated))
100595 put_anon_vma(allocated);
100596+
100597+#ifdef CONFIG_PAX_SEGMEXEC
100598+ if (unlikely(avc_m))
100599+ anon_vma_chain_free(avc_m);
100600+#endif
100601+
100602 if (unlikely(avc))
100603 anon_vma_chain_free(avc);
100604 }
100605 return 0;
100606
100607 out_enomem_free_avc:
100608+
100609+#ifdef CONFIG_PAX_SEGMEXEC
100610+ if (avc_m)
100611+ anon_vma_chain_free(avc_m);
100612+#endif
100613+
100614 anon_vma_chain_free(avc);
100615 out_enomem:
100616 return -ENOMEM;
100617@@ -249,7 +284,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
100618 * good chance of avoiding scanning the whole hierarchy when it searches where
100619 * page is mapped.
100620 */
100621-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
100622+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
100623 {
100624 struct anon_vma_chain *avc, *pavc;
100625 struct anon_vma *root = NULL;
100626@@ -303,7 +338,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
100627 * the corresponding VMA in the parent process is attached to.
100628 * Returns 0 on success, non-zero on failure.
100629 */
100630-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
100631+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
100632 {
100633 struct anon_vma_chain *avc;
100634 struct anon_vma *anon_vma;
100635@@ -423,8 +458,10 @@ static void anon_vma_ctor(void *data)
100636 void __init anon_vma_init(void)
100637 {
100638 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
100639- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
100640- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
100641+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
100642+ anon_vma_ctor);
100643+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
100644+ SLAB_PANIC|SLAB_NO_SANITIZE);
100645 }
100646
100647 /*
100648diff --git a/mm/shmem.c b/mm/shmem.c
100649index cf2d0ca..ec06b8b 100644
100650--- a/mm/shmem.c
100651+++ b/mm/shmem.c
100652@@ -33,7 +33,7 @@
100653 #include <linux/swap.h>
100654 #include <linux/aio.h>
100655
100656-static struct vfsmount *shm_mnt;
100657+struct vfsmount *shm_mnt;
100658
100659 #ifdef CONFIG_SHMEM
100660 /*
100661@@ -80,7 +80,7 @@ static struct vfsmount *shm_mnt;
100662 #define BOGO_DIRENT_SIZE 20
100663
100664 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
100665-#define SHORT_SYMLINK_LEN 128
100666+#define SHORT_SYMLINK_LEN 64
100667
100668 /*
100669 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
100670@@ -2555,6 +2555,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
100671 static int shmem_xattr_validate(const char *name)
100672 {
100673 struct { const char *prefix; size_t len; } arr[] = {
100674+
100675+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
100676+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
100677+#endif
100678+
100679 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
100680 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
100681 };
100682@@ -2610,6 +2615,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
100683 if (err)
100684 return err;
100685
100686+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
100687+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
100688+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
100689+ return -EOPNOTSUPP;
100690+ if (size > 8)
100691+ return -EINVAL;
100692+ }
100693+#endif
100694+
100695 return simple_xattr_set(&info->xattrs, name, value, size, flags);
100696 }
100697
100698@@ -2993,8 +3007,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
100699 int err = -ENOMEM;
100700
100701 /* Round up to L1_CACHE_BYTES to resist false sharing */
100702- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
100703- L1_CACHE_BYTES), GFP_KERNEL);
100704+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
100705 if (!sbinfo)
100706 return -ENOMEM;
100707
100708diff --git a/mm/slab.c b/mm/slab.c
100709index c4b89ea..20990be 100644
100710--- a/mm/slab.c
100711+++ b/mm/slab.c
100712@@ -314,10 +314,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
100713 if ((x)->max_freeable < i) \
100714 (x)->max_freeable = i; \
100715 } while (0)
100716-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
100717-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
100718-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
100719-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
100720+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
100721+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
100722+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
100723+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
100724+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
100725+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
100726 #else
100727 #define STATS_INC_ACTIVE(x) do { } while (0)
100728 #define STATS_DEC_ACTIVE(x) do { } while (0)
100729@@ -334,6 +336,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
100730 #define STATS_INC_ALLOCMISS(x) do { } while (0)
100731 #define STATS_INC_FREEHIT(x) do { } while (0)
100732 #define STATS_INC_FREEMISS(x) do { } while (0)
100733+#define STATS_INC_SANITIZED(x) do { } while (0)
100734+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
100735 #endif
100736
100737 #if DEBUG
100738@@ -450,7 +454,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
100739 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
100740 */
100741 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
100742- const struct page *page, void *obj)
100743+ const struct page *page, const void *obj)
100744 {
100745 u32 offset = (obj - page->s_mem);
100746 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
100747@@ -1438,7 +1442,7 @@ void __init kmem_cache_init(void)
100748 * structures first. Without this, further allocations will bug.
100749 */
100750 kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node",
100751- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
100752+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
100753 slab_state = PARTIAL_NODE;
100754
100755 slab_early_init = 0;
100756@@ -2059,7 +2063,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
100757
100758 cachep = find_mergeable(size, align, flags, name, ctor);
100759 if (cachep) {
100760- cachep->refcount++;
100761+ atomic_inc(&cachep->refcount);
100762
100763 /*
100764 * Adjust the object sizes so that we clear
100765@@ -3357,6 +3361,20 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
100766 struct array_cache *ac = cpu_cache_get(cachep);
100767
100768 check_irq_off();
100769+
100770+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100771+ if (cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))
100772+ STATS_INC_NOT_SANITIZED(cachep);
100773+ else {
100774+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
100775+
100776+ if (cachep->ctor)
100777+ cachep->ctor(objp);
100778+
100779+ STATS_INC_SANITIZED(cachep);
100780+ }
100781+#endif
100782+
100783 kmemleak_free_recursive(objp, cachep->flags);
100784 objp = cache_free_debugcheck(cachep, objp, caller);
100785
100786@@ -3469,7 +3487,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
100787 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
100788 }
100789
100790-void *__kmalloc_node(size_t size, gfp_t flags, int node)
100791+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
100792 {
100793 return __do_kmalloc_node(size, flags, node, _RET_IP_);
100794 }
100795@@ -3489,7 +3507,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
100796 * @flags: the type of memory to allocate (see kmalloc).
100797 * @caller: function caller for debug tracking of the caller
100798 */
100799-static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
100800+static __always_inline void * __size_overflow(1) __do_kmalloc(size_t size, gfp_t flags,
100801 unsigned long caller)
100802 {
100803 struct kmem_cache *cachep;
100804@@ -3562,6 +3580,7 @@ void kfree(const void *objp)
100805
100806 if (unlikely(ZERO_OR_NULL_PTR(objp)))
100807 return;
100808+ VM_BUG_ON(!virt_addr_valid(objp));
100809 local_irq_save(flags);
100810 kfree_debugcheck(objp);
100811 c = virt_to_cache(objp);
100812@@ -3981,14 +4000,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
100813 }
100814 /* cpu stats */
100815 {
100816- unsigned long allochit = atomic_read(&cachep->allochit);
100817- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
100818- unsigned long freehit = atomic_read(&cachep->freehit);
100819- unsigned long freemiss = atomic_read(&cachep->freemiss);
100820+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
100821+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
100822+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
100823+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
100824
100825 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
100826 allochit, allocmiss, freehit, freemiss);
100827 }
100828+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100829+ {
100830+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
100831+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
100832+
100833+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
100834+ }
100835+#endif
100836 #endif
100837 }
100838
100839@@ -4196,13 +4223,69 @@ static const struct file_operations proc_slabstats_operations = {
100840 static int __init slab_proc_init(void)
100841 {
100842 #ifdef CONFIG_DEBUG_SLAB_LEAK
100843- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
100844+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
100845 #endif
100846 return 0;
100847 }
100848 module_init(slab_proc_init);
100849 #endif
100850
100851+bool is_usercopy_object(const void *ptr)
100852+{
100853+ struct page *page;
100854+ struct kmem_cache *cachep;
100855+
100856+ if (ZERO_OR_NULL_PTR(ptr))
100857+ return false;
100858+
100859+ if (!slab_is_available())
100860+ return false;
100861+
100862+ if (!virt_addr_valid(ptr))
100863+ return false;
100864+
100865+ page = virt_to_head_page(ptr);
100866+
100867+ if (!PageSlab(page))
100868+ return false;
100869+
100870+ cachep = page->slab_cache;
100871+ return cachep->flags & SLAB_USERCOPY;
100872+}
100873+
100874+#ifdef CONFIG_PAX_USERCOPY
100875+const char *check_heap_object(const void *ptr, unsigned long n)
100876+{
100877+ struct page *page;
100878+ struct kmem_cache *cachep;
100879+ unsigned int objnr;
100880+ unsigned long offset;
100881+
100882+ if (ZERO_OR_NULL_PTR(ptr))
100883+ return "<null>";
100884+
100885+ if (!virt_addr_valid(ptr))
100886+ return NULL;
100887+
100888+ page = virt_to_head_page(ptr);
100889+
100890+ if (!PageSlab(page))
100891+ return NULL;
100892+
100893+ cachep = page->slab_cache;
100894+ if (!(cachep->flags & SLAB_USERCOPY))
100895+ return cachep->name;
100896+
100897+ objnr = obj_to_index(cachep, page, ptr);
100898+ BUG_ON(objnr >= cachep->num);
100899+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
100900+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
100901+ return NULL;
100902+
100903+ return cachep->name;
100904+}
100905+#endif
100906+
100907 /**
100908 * ksize - get the actual amount of memory allocated for a given object
100909 * @objp: Pointer to the object
100910diff --git a/mm/slab.h b/mm/slab.h
100911index 4c3ac12..7b2e470 100644
100912--- a/mm/slab.h
100913+++ b/mm/slab.h
100914@@ -22,7 +22,7 @@ struct kmem_cache {
100915 unsigned int align; /* Alignment as calculated */
100916 unsigned long flags; /* Active flags on the slab */
100917 const char *name; /* Slab name for sysfs */
100918- int refcount; /* Use counter */
100919+ atomic_t refcount; /* Use counter */
100920 void (*ctor)(void *); /* Called on object slot creation */
100921 struct list_head list; /* List of all slab caches on the system */
100922 };
100923@@ -66,6 +66,20 @@ extern struct list_head slab_caches;
100924 /* The slab cache that manages slab cache information */
100925 extern struct kmem_cache *kmem_cache;
100926
100927+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100928+#ifdef CONFIG_X86_64
100929+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
100930+#else
100931+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
100932+#endif
100933+enum pax_sanitize_mode {
100934+ PAX_SANITIZE_SLAB_OFF = 0,
100935+ PAX_SANITIZE_SLAB_FAST,
100936+ PAX_SANITIZE_SLAB_FULL,
100937+};
100938+extern enum pax_sanitize_mode pax_sanitize_slab;
100939+#endif
100940+
100941 unsigned long calculate_alignment(unsigned long flags,
100942 unsigned long align, unsigned long size);
100943
100944@@ -114,7 +128,8 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
100945
100946 /* Legal flag mask for kmem_cache_create(), for various configurations */
100947 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
100948- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
100949+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
100950+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
100951
100952 #if defined(CONFIG_DEBUG_SLAB)
100953 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
100954@@ -315,6 +330,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
100955 return s;
100956
100957 page = virt_to_head_page(x);
100958+
100959+ BUG_ON(!PageSlab(page));
100960+
100961 cachep = page->slab_cache;
100962 if (slab_equal_or_root(cachep, s))
100963 return cachep;
100964diff --git a/mm/slab_common.c b/mm/slab_common.c
100965index 999bb34..9843aea 100644
100966--- a/mm/slab_common.c
100967+++ b/mm/slab_common.c
100968@@ -25,11 +25,35 @@
100969
100970 #include "slab.h"
100971
100972-enum slab_state slab_state;
100973+enum slab_state slab_state __read_only;
100974 LIST_HEAD(slab_caches);
100975 DEFINE_MUTEX(slab_mutex);
100976 struct kmem_cache *kmem_cache;
100977
100978+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100979+enum pax_sanitize_mode pax_sanitize_slab __read_only = PAX_SANITIZE_SLAB_FAST;
100980+static int __init pax_sanitize_slab_setup(char *str)
100981+{
100982+ if (!str)
100983+ return 0;
100984+
100985+ if (!strcmp(str, "0") || !strcmp(str, "off")) {
100986+ pr_info("PaX slab sanitization: %s\n", "disabled");
100987+ pax_sanitize_slab = PAX_SANITIZE_SLAB_OFF;
100988+ } else if (!strcmp(str, "1") || !strcmp(str, "fast")) {
100989+ pr_info("PaX slab sanitization: %s\n", "fast");
100990+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FAST;
100991+ } else if (!strcmp(str, "full")) {
100992+ pr_info("PaX slab sanitization: %s\n", "full");
100993+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FULL;
100994+ } else
100995+ pr_err("PaX slab sanitization: unsupported option '%s'\n", str);
100996+
100997+ return 0;
100998+}
100999+early_param("pax_sanitize_slab", pax_sanitize_slab_setup);
101000+#endif
101001+
101002 /*
101003 * Set of flags that will prevent slab merging
101004 */
101005@@ -44,7 +68,7 @@ struct kmem_cache *kmem_cache;
101006 * Merge control. If this is set then no merging of slab caches will occur.
101007 * (Could be removed. This was introduced to pacify the merge skeptics.)
101008 */
101009-static int slab_nomerge;
101010+static int slab_nomerge = 1;
101011
101012 static int __init setup_slab_nomerge(char *str)
101013 {
101014@@ -217,7 +241,7 @@ int slab_unmergeable(struct kmem_cache *s)
101015 /*
101016 * We may have set a slab to be unmergeable during bootstrap.
101017 */
101018- if (s->refcount < 0)
101019+ if (atomic_read(&s->refcount) < 0)
101020 return 1;
101021
101022 return 0;
101023@@ -321,7 +345,7 @@ do_kmem_cache_create(const char *name, size_t object_size, size_t size,
101024 if (err)
101025 goto out_free_cache;
101026
101027- s->refcount = 1;
101028+ atomic_set(&s->refcount, 1);
101029 list_add(&s->list, &slab_caches);
101030 out:
101031 if (err)
101032@@ -386,6 +410,13 @@ kmem_cache_create(const char *name, size_t size, size_t align,
101033 */
101034 flags &= CACHE_CREATE_MASK;
101035
101036+#ifdef CONFIG_PAX_MEMORY_SANITIZE
101037+ if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
101038+ flags |= SLAB_NO_SANITIZE;
101039+ else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
101040+ flags &= ~SLAB_NO_SANITIZE;
101041+#endif
101042+
101043 s = __kmem_cache_alias(name, size, align, flags, ctor);
101044 if (s)
101045 goto out_unlock;
101046@@ -456,7 +487,7 @@ static void do_kmem_cache_release(struct list_head *release,
101047 rcu_barrier();
101048
101049 list_for_each_entry_safe(s, s2, release, list) {
101050-#ifdef SLAB_SUPPORTS_SYSFS
101051+#if defined(SLAB_SUPPORTS_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
101052 sysfs_slab_remove(s);
101053 #else
101054 slab_kmem_cache_release(s);
101055@@ -625,8 +656,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
101056
101057 mutex_lock(&slab_mutex);
101058
101059- s->refcount--;
101060- if (s->refcount)
101061+ if (!atomic_dec_and_test(&s->refcount))
101062 goto out_unlock;
101063
101064 for_each_memcg_cache_safe(c, c2, s) {
101065@@ -691,7 +721,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
101066 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
101067 name, size, err);
101068
101069- s->refcount = -1; /* Exempt from merging for now */
101070+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
101071 }
101072
101073 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
101074@@ -704,7 +734,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
101075
101076 create_boot_cache(s, name, size, flags);
101077 list_add(&s->list, &slab_caches);
101078- s->refcount = 1;
101079+ atomic_set(&s->refcount, 1);
101080 return s;
101081 }
101082
101083@@ -716,6 +746,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
101084 EXPORT_SYMBOL(kmalloc_dma_caches);
101085 #endif
101086
101087+#ifdef CONFIG_PAX_USERCOPY_SLABS
101088+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
101089+EXPORT_SYMBOL(kmalloc_usercopy_caches);
101090+#endif
101091+
101092 /*
101093 * Conversion table for small slabs sizes / 8 to the index in the
101094 * kmalloc array. This is necessary for slabs < 192 since we have non power
101095@@ -780,6 +815,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
101096 return kmalloc_dma_caches[index];
101097
101098 #endif
101099+
101100+#ifdef CONFIG_PAX_USERCOPY_SLABS
101101+ if (unlikely((flags & GFP_USERCOPY)))
101102+ return kmalloc_usercopy_caches[index];
101103+
101104+#endif
101105+
101106 return kmalloc_caches[index];
101107 }
101108
101109@@ -836,7 +878,7 @@ void __init create_kmalloc_caches(unsigned long flags)
101110 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
101111 if (!kmalloc_caches[i]) {
101112 kmalloc_caches[i] = create_kmalloc_cache(NULL,
101113- 1 << i, flags);
101114+ 1 << i, SLAB_USERCOPY | flags);
101115 }
101116
101117 /*
101118@@ -845,10 +887,10 @@ void __init create_kmalloc_caches(unsigned long flags)
101119 * earlier power of two caches
101120 */
101121 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
101122- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
101123+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
101124
101125 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
101126- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
101127+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
101128 }
101129
101130 /* Kmalloc array is now usable */
101131@@ -881,6 +923,23 @@ void __init create_kmalloc_caches(unsigned long flags)
101132 }
101133 }
101134 #endif
101135+
101136+#ifdef CONFIG_PAX_USERCOPY_SLABS
101137+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
101138+ struct kmem_cache *s = kmalloc_caches[i];
101139+
101140+ if (s) {
101141+ int size = kmalloc_size(i);
101142+ char *n = kasprintf(GFP_NOWAIT,
101143+ "usercopy-kmalloc-%d", size);
101144+
101145+ BUG_ON(!n);
101146+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
101147+ size, SLAB_USERCOPY | flags);
101148+ }
101149+ }
101150+#endif
101151+
101152 }
101153 #endif /* !CONFIG_SLOB */
101154
101155@@ -940,6 +999,9 @@ static void print_slabinfo_header(struct seq_file *m)
101156 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
101157 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
101158 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
101159+#ifdef CONFIG_PAX_MEMORY_SANITIZE
101160+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
101161+#endif
101162 #endif
101163 seq_putc(m, '\n');
101164 }
101165@@ -1069,7 +1131,7 @@ static int __init slab_proc_init(void)
101166 module_init(slab_proc_init);
101167 #endif /* CONFIG_SLABINFO */
101168
101169-static __always_inline void *__do_krealloc(const void *p, size_t new_size,
101170+static __always_inline void * __size_overflow(2) __do_krealloc(const void *p, size_t new_size,
101171 gfp_t flags)
101172 {
101173 void *ret;
101174diff --git a/mm/slob.c b/mm/slob.c
101175index 94a7fed..cf3fb1a 100644
101176--- a/mm/slob.c
101177+++ b/mm/slob.c
101178@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
101179 /*
101180 * Return the size of a slob block.
101181 */
101182-static slobidx_t slob_units(slob_t *s)
101183+static slobidx_t slob_units(const slob_t *s)
101184 {
101185 if (s->units > 0)
101186 return s->units;
101187@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
101188 /*
101189 * Return the next free slob block pointer after this one.
101190 */
101191-static slob_t *slob_next(slob_t *s)
101192+static slob_t *slob_next(const slob_t *s)
101193 {
101194 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
101195 slobidx_t next;
101196@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
101197 /*
101198 * Returns true if s is the last free block in its page.
101199 */
101200-static int slob_last(slob_t *s)
101201+static int slob_last(const slob_t *s)
101202 {
101203 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
101204 }
101205
101206-static void *slob_new_pages(gfp_t gfp, int order, int node)
101207+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
101208 {
101209- void *page;
101210+ struct page *page;
101211
101212 #ifdef CONFIG_NUMA
101213 if (node != NUMA_NO_NODE)
101214@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
101215 if (!page)
101216 return NULL;
101217
101218- return page_address(page);
101219+ __SetPageSlab(page);
101220+ return page;
101221 }
101222
101223-static void slob_free_pages(void *b, int order)
101224+static void slob_free_pages(struct page *sp, int order)
101225 {
101226 if (current->reclaim_state)
101227 current->reclaim_state->reclaimed_slab += 1 << order;
101228- free_pages((unsigned long)b, order);
101229+ __ClearPageSlab(sp);
101230+ page_mapcount_reset(sp);
101231+ sp->private = 0;
101232+ __free_pages(sp, order);
101233 }
101234
101235 /*
101236@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
101237
101238 /* Not enough space: must allocate a new page */
101239 if (!b) {
101240- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
101241- if (!b)
101242+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
101243+ if (!sp)
101244 return NULL;
101245- sp = virt_to_page(b);
101246- __SetPageSlab(sp);
101247+ b = page_address(sp);
101248
101249 spin_lock_irqsave(&slob_lock, flags);
101250 sp->units = SLOB_UNITS(PAGE_SIZE);
101251 sp->freelist = b;
101252+ sp->private = 0;
101253 INIT_LIST_HEAD(&sp->lru);
101254 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
101255 set_slob_page_free(sp, slob_list);
101256@@ -337,7 +341,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
101257 /*
101258 * slob_free: entry point into the slob allocator.
101259 */
101260-static void slob_free(void *block, int size)
101261+static void slob_free(struct kmem_cache *c, void *block, int size)
101262 {
101263 struct page *sp;
101264 slob_t *prev, *next, *b = (slob_t *)block;
101265@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
101266 if (slob_page_free(sp))
101267 clear_slob_page_free(sp);
101268 spin_unlock_irqrestore(&slob_lock, flags);
101269- __ClearPageSlab(sp);
101270- page_mapcount_reset(sp);
101271- slob_free_pages(b, 0);
101272+ slob_free_pages(sp, 0);
101273 return;
101274 }
101275
101276+#ifdef CONFIG_PAX_MEMORY_SANITIZE
101277+ if (pax_sanitize_slab && !(c && (c->flags & SLAB_NO_SANITIZE)))
101278+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
101279+#endif
101280+
101281 if (!slob_page_free(sp)) {
101282 /* This slob page is about to become partially free. Easy! */
101283 sp->units = units;
101284@@ -424,11 +431,10 @@ out:
101285 */
101286
101287 static __always_inline void *
101288-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
101289+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
101290 {
101291- unsigned int *m;
101292- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
101293- void *ret;
101294+ slob_t *m;
101295+ void *ret = NULL;
101296
101297 gfp &= gfp_allowed_mask;
101298
101299@@ -442,27 +448,45 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
101300
101301 if (!m)
101302 return NULL;
101303- *m = size;
101304+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
101305+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
101306+ m[0].units = size;
101307+ m[1].units = align;
101308 ret = (void *)m + align;
101309
101310 trace_kmalloc_node(caller, ret,
101311 size, size + align, gfp, node);
101312 } else {
101313 unsigned int order = get_order(size);
101314+ struct page *page;
101315
101316 if (likely(order))
101317 gfp |= __GFP_COMP;
101318- ret = slob_new_pages(gfp, order, node);
101319+ page = slob_new_pages(gfp, order, node);
101320+ if (page) {
101321+ ret = page_address(page);
101322+ page->private = size;
101323+ }
101324
101325 trace_kmalloc_node(caller, ret,
101326 size, PAGE_SIZE << order, gfp, node);
101327 }
101328
101329- kmemleak_alloc(ret, size, 1, gfp);
101330 return ret;
101331 }
101332
101333-void *__kmalloc(size_t size, gfp_t gfp)
101334+static __always_inline void *
101335+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
101336+{
101337+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
101338+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
101339+
101340+ if (!ZERO_OR_NULL_PTR(ret))
101341+ kmemleak_alloc(ret, size, 1, gfp);
101342+ return ret;
101343+}
101344+
101345+void * __size_overflow(1) __kmalloc(size_t size, gfp_t gfp)
101346 {
101347 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
101348 }
101349@@ -491,34 +515,112 @@ void kfree(const void *block)
101350 return;
101351 kmemleak_free(block);
101352
101353+ VM_BUG_ON(!virt_addr_valid(block));
101354 sp = virt_to_page(block);
101355- if (PageSlab(sp)) {
101356+ VM_BUG_ON(!PageSlab(sp));
101357+ if (!sp->private) {
101358 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
101359- unsigned int *m = (unsigned int *)(block - align);
101360- slob_free(m, *m + align);
101361- } else
101362+ slob_t *m = (slob_t *)(block - align);
101363+ slob_free(NULL, m, m[0].units + align);
101364+ } else {
101365+ __ClearPageSlab(sp);
101366+ page_mapcount_reset(sp);
101367+ sp->private = 0;
101368 __free_pages(sp, compound_order(sp));
101369+ }
101370 }
101371 EXPORT_SYMBOL(kfree);
101372
101373+bool is_usercopy_object(const void *ptr)
101374+{
101375+ if (!slab_is_available())
101376+ return false;
101377+
101378+ // PAX: TODO
101379+
101380+ return false;
101381+}
101382+
101383+#ifdef CONFIG_PAX_USERCOPY
101384+const char *check_heap_object(const void *ptr, unsigned long n)
101385+{
101386+ struct page *page;
101387+ const slob_t *free;
101388+ const void *base;
101389+ unsigned long flags;
101390+
101391+ if (ZERO_OR_NULL_PTR(ptr))
101392+ return "<null>";
101393+
101394+ if (!virt_addr_valid(ptr))
101395+ return NULL;
101396+
101397+ page = virt_to_head_page(ptr);
101398+ if (!PageSlab(page))
101399+ return NULL;
101400+
101401+ if (page->private) {
101402+ base = page;
101403+ if (base <= ptr && n <= page->private - (ptr - base))
101404+ return NULL;
101405+ return "<slob>";
101406+ }
101407+
101408+ /* some tricky double walking to find the chunk */
101409+ spin_lock_irqsave(&slob_lock, flags);
101410+ base = (void *)((unsigned long)ptr & PAGE_MASK);
101411+ free = page->freelist;
101412+
101413+ while (!slob_last(free) && (void *)free <= ptr) {
101414+ base = free + slob_units(free);
101415+ free = slob_next(free);
101416+ }
101417+
101418+ while (base < (void *)free) {
101419+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
101420+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
101421+ int offset;
101422+
101423+ if (ptr < base + align)
101424+ break;
101425+
101426+ offset = ptr - base - align;
101427+ if (offset >= m) {
101428+ base += size;
101429+ continue;
101430+ }
101431+
101432+ if (n > m - offset)
101433+ break;
101434+
101435+ spin_unlock_irqrestore(&slob_lock, flags);
101436+ return NULL;
101437+ }
101438+
101439+ spin_unlock_irqrestore(&slob_lock, flags);
101440+ return "<slob>";
101441+}
101442+#endif
101443+
101444 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
101445 size_t ksize(const void *block)
101446 {
101447 struct page *sp;
101448 int align;
101449- unsigned int *m;
101450+ slob_t *m;
101451
101452 BUG_ON(!block);
101453 if (unlikely(block == ZERO_SIZE_PTR))
101454 return 0;
101455
101456 sp = virt_to_page(block);
101457- if (unlikely(!PageSlab(sp)))
101458- return PAGE_SIZE << compound_order(sp);
101459+ VM_BUG_ON(!PageSlab(sp));
101460+ if (sp->private)
101461+ return sp->private;
101462
101463 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
101464- m = (unsigned int *)(block - align);
101465- return SLOB_UNITS(*m) * SLOB_UNIT;
101466+ m = (slob_t *)(block - align);
101467+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
101468 }
101469 EXPORT_SYMBOL(ksize);
101470
101471@@ -534,23 +636,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
101472
101473 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
101474 {
101475- void *b;
101476+ void *b = NULL;
101477
101478 flags &= gfp_allowed_mask;
101479
101480 lockdep_trace_alloc(flags);
101481
101482+#ifdef CONFIG_PAX_USERCOPY_SLABS
101483+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
101484+#else
101485 if (c->size < PAGE_SIZE) {
101486 b = slob_alloc(c->size, flags, c->align, node);
101487 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
101488 SLOB_UNITS(c->size) * SLOB_UNIT,
101489 flags, node);
101490 } else {
101491- b = slob_new_pages(flags, get_order(c->size), node);
101492+ struct page *sp;
101493+
101494+ sp = slob_new_pages(flags, get_order(c->size), node);
101495+ if (sp) {
101496+ b = page_address(sp);
101497+ sp->private = c->size;
101498+ }
101499 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
101500 PAGE_SIZE << get_order(c->size),
101501 flags, node);
101502 }
101503+#endif
101504
101505 if (b && c->ctor)
101506 c->ctor(b);
101507@@ -567,7 +679,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
101508 EXPORT_SYMBOL(kmem_cache_alloc);
101509
101510 #ifdef CONFIG_NUMA
101511-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
101512+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t gfp, int node)
101513 {
101514 return __do_kmalloc_node(size, gfp, node, _RET_IP_);
101515 }
101516@@ -580,12 +692,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
101517 EXPORT_SYMBOL(kmem_cache_alloc_node);
101518 #endif
101519
101520-static void __kmem_cache_free(void *b, int size)
101521+static void __kmem_cache_free(struct kmem_cache *c, void *b, int size)
101522 {
101523- if (size < PAGE_SIZE)
101524- slob_free(b, size);
101525+ struct page *sp;
101526+
101527+ sp = virt_to_page(b);
101528+ BUG_ON(!PageSlab(sp));
101529+ if (!sp->private)
101530+ slob_free(c, b, size);
101531 else
101532- slob_free_pages(b, get_order(size));
101533+ slob_free_pages(sp, get_order(size));
101534 }
101535
101536 static void kmem_rcu_free(struct rcu_head *head)
101537@@ -593,22 +709,36 @@ static void kmem_rcu_free(struct rcu_head *head)
101538 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
101539 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
101540
101541- __kmem_cache_free(b, slob_rcu->size);
101542+ __kmem_cache_free(NULL, b, slob_rcu->size);
101543 }
101544
101545 void kmem_cache_free(struct kmem_cache *c, void *b)
101546 {
101547+ int size = c->size;
101548+
101549+#ifdef CONFIG_PAX_USERCOPY_SLABS
101550+ if (size + c->align < PAGE_SIZE) {
101551+ size += c->align;
101552+ b -= c->align;
101553+ }
101554+#endif
101555+
101556 kmemleak_free_recursive(b, c->flags);
101557 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
101558 struct slob_rcu *slob_rcu;
101559- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
101560- slob_rcu->size = c->size;
101561+ slob_rcu = b + (size - sizeof(struct slob_rcu));
101562+ slob_rcu->size = size;
101563 call_rcu(&slob_rcu->head, kmem_rcu_free);
101564 } else {
101565- __kmem_cache_free(b, c->size);
101566+ __kmem_cache_free(c, b, size);
101567 }
101568
101569+#ifdef CONFIG_PAX_USERCOPY_SLABS
101570+ trace_kfree(_RET_IP_, b);
101571+#else
101572 trace_kmem_cache_free(_RET_IP_, b);
101573+#endif
101574+
101575 }
101576 EXPORT_SYMBOL(kmem_cache_free);
101577
101578diff --git a/mm/slub.c b/mm/slub.c
101579index 82c4737..55c316a 100644
101580--- a/mm/slub.c
101581+++ b/mm/slub.c
101582@@ -198,7 +198,7 @@ struct track {
101583
101584 enum track_item { TRACK_ALLOC, TRACK_FREE };
101585
101586-#ifdef CONFIG_SYSFS
101587+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
101588 static int sysfs_slab_add(struct kmem_cache *);
101589 static int sysfs_slab_alias(struct kmem_cache *, const char *);
101590 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
101591@@ -556,7 +556,7 @@ static void print_track(const char *s, struct track *t)
101592 if (!t->addr)
101593 return;
101594
101595- pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
101596+ pr_err("INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
101597 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
101598 #ifdef CONFIG_STACKTRACE
101599 {
101600@@ -2709,6 +2709,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
101601
101602 slab_free_hook(s, x);
101603
101604+#ifdef CONFIG_PAX_MEMORY_SANITIZE
101605+ if (!(s->flags & SLAB_NO_SANITIZE)) {
101606+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
101607+ if (s->ctor)
101608+ s->ctor(x);
101609+ }
101610+#endif
101611+
101612 redo:
101613 /*
101614 * Determine the currently cpus per cpu slab.
101615@@ -3050,6 +3058,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
101616 s->inuse = size;
101617
101618 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
101619+#ifdef CONFIG_PAX_MEMORY_SANITIZE
101620+ (!(flags & SLAB_NO_SANITIZE)) ||
101621+#endif
101622 s->ctor)) {
101623 /*
101624 * Relocate free pointer after the object if it is not
101625@@ -3304,7 +3315,7 @@ static int __init setup_slub_min_objects(char *str)
101626
101627 __setup("slub_min_objects=", setup_slub_min_objects);
101628
101629-void *__kmalloc(size_t size, gfp_t flags)
101630+void * __size_overflow(1) __kmalloc(size_t size, gfp_t flags)
101631 {
101632 struct kmem_cache *s;
101633 void *ret;
101634@@ -3342,7 +3353,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
101635 return ptr;
101636 }
101637
101638-void *__kmalloc_node(size_t size, gfp_t flags, int node)
101639+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
101640 {
101641 struct kmem_cache *s;
101642 void *ret;
101643@@ -3390,6 +3401,59 @@ static size_t __ksize(const void *object)
101644 return slab_ksize(page->slab_cache);
101645 }
101646
101647+bool is_usercopy_object(const void *ptr)
101648+{
101649+ struct page *page;
101650+ struct kmem_cache *s;
101651+
101652+ if (ZERO_OR_NULL_PTR(ptr))
101653+ return false;
101654+
101655+ if (!slab_is_available())
101656+ return false;
101657+
101658+ if (!virt_addr_valid(ptr))
101659+ return false;
101660+
101661+ page = virt_to_head_page(ptr);
101662+
101663+ if (!PageSlab(page))
101664+ return false;
101665+
101666+ s = page->slab_cache;
101667+ return s->flags & SLAB_USERCOPY;
101668+}
101669+
101670+#ifdef CONFIG_PAX_USERCOPY
101671+const char *check_heap_object(const void *ptr, unsigned long n)
101672+{
101673+ struct page *page;
101674+ struct kmem_cache *s;
101675+ unsigned long offset;
101676+
101677+ if (ZERO_OR_NULL_PTR(ptr))
101678+ return "<null>";
101679+
101680+ if (!virt_addr_valid(ptr))
101681+ return NULL;
101682+
101683+ page = virt_to_head_page(ptr);
101684+
101685+ if (!PageSlab(page))
101686+ return NULL;
101687+
101688+ s = page->slab_cache;
101689+ if (!(s->flags & SLAB_USERCOPY))
101690+ return s->name;
101691+
101692+ offset = (ptr - page_address(page)) % s->size;
101693+ if (offset <= s->object_size && n <= s->object_size - offset)
101694+ return NULL;
101695+
101696+ return s->name;
101697+}
101698+#endif
101699+
101700 size_t ksize(const void *object)
101701 {
101702 size_t size = __ksize(object);
101703@@ -3410,6 +3474,7 @@ void kfree(const void *x)
101704 if (unlikely(ZERO_OR_NULL_PTR(x)))
101705 return;
101706
101707+ VM_BUG_ON(!virt_addr_valid(x));
101708 page = virt_to_head_page(x);
101709 if (unlikely(!PageSlab(page))) {
101710 BUG_ON(!PageCompound(page));
101711@@ -3726,7 +3791,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
101712
101713 s = find_mergeable(size, align, flags, name, ctor);
101714 if (s) {
101715- s->refcount++;
101716+ atomic_inc(&s->refcount);
101717
101718 /*
101719 * Adjust the object sizes so that we clear
101720@@ -3742,7 +3807,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
101721 }
101722
101723 if (sysfs_slab_alias(s, name)) {
101724- s->refcount--;
101725+ atomic_dec(&s->refcount);
101726 s = NULL;
101727 }
101728 }
101729@@ -3859,7 +3924,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
101730 }
101731 #endif
101732
101733-#ifdef CONFIG_SYSFS
101734+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
101735 static int count_inuse(struct page *page)
101736 {
101737 return page->inuse;
101738@@ -4140,7 +4205,11 @@ static int list_locations(struct kmem_cache *s, char *buf,
101739 len += sprintf(buf + len, "%7ld ", l->count);
101740
101741 if (l->addr)
101742+#ifdef CONFIG_GRKERNSEC_HIDESYM
101743+ len += sprintf(buf + len, "%pS", NULL);
101744+#else
101745 len += sprintf(buf + len, "%pS", (void *)l->addr);
101746+#endif
101747 else
101748 len += sprintf(buf + len, "<not-available>");
101749
101750@@ -4238,12 +4307,12 @@ static void __init resiliency_test(void)
101751 validate_slab_cache(kmalloc_caches[9]);
101752 }
101753 #else
101754-#ifdef CONFIG_SYSFS
101755+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
101756 static void resiliency_test(void) {};
101757 #endif
101758 #endif
101759
101760-#ifdef CONFIG_SYSFS
101761+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
101762 enum slab_stat_type {
101763 SL_ALL, /* All slabs */
101764 SL_PARTIAL, /* Only partially allocated slabs */
101765@@ -4480,13 +4549,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
101766 {
101767 if (!s->ctor)
101768 return 0;
101769+#ifdef CONFIG_GRKERNSEC_HIDESYM
101770+ return sprintf(buf, "%pS\n", NULL);
101771+#else
101772 return sprintf(buf, "%pS\n", s->ctor);
101773+#endif
101774 }
101775 SLAB_ATTR_RO(ctor);
101776
101777 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
101778 {
101779- return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
101780+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) < 0 ? 0 : atomic_read(&s->refcount) - 1);
101781 }
101782 SLAB_ATTR_RO(aliases);
101783
101784@@ -4574,6 +4647,22 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
101785 SLAB_ATTR_RO(cache_dma);
101786 #endif
101787
101788+#ifdef CONFIG_PAX_USERCOPY_SLABS
101789+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
101790+{
101791+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
101792+}
101793+SLAB_ATTR_RO(usercopy);
101794+#endif
101795+
101796+#ifdef CONFIG_PAX_MEMORY_SANITIZE
101797+static ssize_t sanitize_show(struct kmem_cache *s, char *buf)
101798+{
101799+ return sprintf(buf, "%d\n", !(s->flags & SLAB_NO_SANITIZE));
101800+}
101801+SLAB_ATTR_RO(sanitize);
101802+#endif
101803+
101804 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
101805 {
101806 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
101807@@ -4629,7 +4718,7 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf,
101808 * as well as cause other issues like converting a mergeable
101809 * cache into an umergeable one.
101810 */
101811- if (s->refcount > 1)
101812+ if (atomic_read(&s->refcount) > 1)
101813 return -EINVAL;
101814
101815 s->flags &= ~SLAB_TRACE;
101816@@ -4749,7 +4838,7 @@ static ssize_t failslab_show(struct kmem_cache *s, char *buf)
101817 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
101818 size_t length)
101819 {
101820- if (s->refcount > 1)
101821+ if (atomic_read(&s->refcount) > 1)
101822 return -EINVAL;
101823
101824 s->flags &= ~SLAB_FAILSLAB;
101825@@ -4916,6 +5005,12 @@ static struct attribute *slab_attrs[] = {
101826 #ifdef CONFIG_ZONE_DMA
101827 &cache_dma_attr.attr,
101828 #endif
101829+#ifdef CONFIG_PAX_USERCOPY_SLABS
101830+ &usercopy_attr.attr,
101831+#endif
101832+#ifdef CONFIG_PAX_MEMORY_SANITIZE
101833+ &sanitize_attr.attr,
101834+#endif
101835 #ifdef CONFIG_NUMA
101836 &remote_node_defrag_ratio_attr.attr,
101837 #endif
101838@@ -5157,6 +5252,7 @@ static char *create_unique_id(struct kmem_cache *s)
101839 return name;
101840 }
101841
101842+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
101843 static int sysfs_slab_add(struct kmem_cache *s)
101844 {
101845 int err;
101846@@ -5230,6 +5326,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
101847 kobject_del(&s->kobj);
101848 kobject_put(&s->kobj);
101849 }
101850+#endif
101851
101852 /*
101853 * Need to buffer aliases during bootup until sysfs becomes
101854@@ -5243,6 +5340,7 @@ struct saved_alias {
101855
101856 static struct saved_alias *alias_list;
101857
101858+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
101859 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
101860 {
101861 struct saved_alias *al;
101862@@ -5265,6 +5363,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
101863 alias_list = al;
101864 return 0;
101865 }
101866+#endif
101867
101868 static int __init slab_sysfs_init(void)
101869 {
101870diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
101871index 4cba9c2..b4f9fcc 100644
101872--- a/mm/sparse-vmemmap.c
101873+++ b/mm/sparse-vmemmap.c
101874@@ -131,7 +131,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
101875 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
101876 if (!p)
101877 return NULL;
101878- pud_populate(&init_mm, pud, p);
101879+ pud_populate_kernel(&init_mm, pud, p);
101880 }
101881 return pud;
101882 }
101883@@ -143,7 +143,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
101884 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
101885 if (!p)
101886 return NULL;
101887- pgd_populate(&init_mm, pgd, p);
101888+ pgd_populate_kernel(&init_mm, pgd, p);
101889 }
101890 return pgd;
101891 }
101892diff --git a/mm/sparse.c b/mm/sparse.c
101893index d1b48b6..6e8590e 100644
101894--- a/mm/sparse.c
101895+++ b/mm/sparse.c
101896@@ -750,7 +750,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
101897
101898 for (i = 0; i < PAGES_PER_SECTION; i++) {
101899 if (PageHWPoison(&memmap[i])) {
101900- atomic_long_sub(1, &num_poisoned_pages);
101901+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
101902 ClearPageHWPoison(&memmap[i]);
101903 }
101904 }
101905diff --git a/mm/swap.c b/mm/swap.c
101906index cd3a5e6..40c0c8f 100644
101907--- a/mm/swap.c
101908+++ b/mm/swap.c
101909@@ -31,6 +31,7 @@
101910 #include <linux/memcontrol.h>
101911 #include <linux/gfp.h>
101912 #include <linux/uio.h>
101913+#include <linux/hugetlb.h>
101914
101915 #include "internal.h"
101916
101917@@ -77,6 +78,8 @@ static void __put_compound_page(struct page *page)
101918
101919 __page_cache_release(page);
101920 dtor = get_compound_page_dtor(page);
101921+ if (!PageHuge(page))
101922+ BUG_ON(dtor != free_compound_page);
101923 (*dtor)(page);
101924 }
101925
101926diff --git a/mm/swapfile.c b/mm/swapfile.c
101927index 63f55cc..31874e6 100644
101928--- a/mm/swapfile.c
101929+++ b/mm/swapfile.c
101930@@ -84,7 +84,7 @@ static DEFINE_MUTEX(swapon_mutex);
101931
101932 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
101933 /* Activity counter to indicate that a swapon or swapoff has occurred */
101934-static atomic_t proc_poll_event = ATOMIC_INIT(0);
101935+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
101936
101937 static inline unsigned char swap_count(unsigned char ent)
101938 {
101939@@ -1944,7 +1944,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
101940 spin_unlock(&swap_lock);
101941
101942 err = 0;
101943- atomic_inc(&proc_poll_event);
101944+ atomic_inc_unchecked(&proc_poll_event);
101945 wake_up_interruptible(&proc_poll_wait);
101946
101947 out_dput:
101948@@ -1961,8 +1961,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
101949
101950 poll_wait(file, &proc_poll_wait, wait);
101951
101952- if (seq->poll_event != atomic_read(&proc_poll_event)) {
101953- seq->poll_event = atomic_read(&proc_poll_event);
101954+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
101955+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
101956 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
101957 }
101958
101959@@ -2060,7 +2060,7 @@ static int swaps_open(struct inode *inode, struct file *file)
101960 return ret;
101961
101962 seq = file->private_data;
101963- seq->poll_event = atomic_read(&proc_poll_event);
101964+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
101965 return 0;
101966 }
101967
101968@@ -2520,7 +2520,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
101969 (frontswap_map) ? "FS" : "");
101970
101971 mutex_unlock(&swapon_mutex);
101972- atomic_inc(&proc_poll_event);
101973+ atomic_inc_unchecked(&proc_poll_event);
101974 wake_up_interruptible(&proc_poll_wait);
101975
101976 if (S_ISREG(inode->i_mode))
101977diff --git a/mm/util.c b/mm/util.c
101978index 3981ae9..28b585b 100644
101979--- a/mm/util.c
101980+++ b/mm/util.c
101981@@ -233,6 +233,12 @@ struct task_struct *task_of_stack(struct task_struct *task,
101982 void arch_pick_mmap_layout(struct mm_struct *mm)
101983 {
101984 mm->mmap_base = TASK_UNMAPPED_BASE;
101985+
101986+#ifdef CONFIG_PAX_RANDMMAP
101987+ if (mm->pax_flags & MF_PAX_RANDMMAP)
101988+ mm->mmap_base += mm->delta_mmap;
101989+#endif
101990+
101991 mm->get_unmapped_area = arch_get_unmapped_area;
101992 }
101993 #endif
101994@@ -403,6 +409,9 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
101995 if (!mm->arg_end)
101996 goto out_mm; /* Shh! No looking before we're done */
101997
101998+ if (gr_acl_handle_procpidmem(task))
101999+ goto out_mm;
102000+
102001 len = mm->arg_end - mm->arg_start;
102002
102003 if (len > buflen)
102004diff --git a/mm/vmalloc.c b/mm/vmalloc.c
102005index 49abccf..7bd1931 100644
102006--- a/mm/vmalloc.c
102007+++ b/mm/vmalloc.c
102008@@ -39,20 +39,65 @@ struct vfree_deferred {
102009 struct work_struct wq;
102010 };
102011 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
102012+static DEFINE_PER_CPU(struct vfree_deferred, vunmap_deferred);
102013+
102014+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
102015+struct stack_deferred_llist {
102016+ struct llist_head list;
102017+ void *stack;
102018+ void *lowmem_stack;
102019+};
102020+
102021+struct stack_deferred {
102022+ struct stack_deferred_llist list;
102023+ struct work_struct wq;
102024+};
102025+
102026+static DEFINE_PER_CPU(struct stack_deferred, stack_deferred);
102027+#endif
102028
102029 static void __vunmap(const void *, int);
102030
102031-static void free_work(struct work_struct *w)
102032+static void vfree_work(struct work_struct *w)
102033 {
102034 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
102035 struct llist_node *llnode = llist_del_all(&p->list);
102036 while (llnode) {
102037- void *p = llnode;
102038+ void *x = llnode;
102039 llnode = llist_next(llnode);
102040- __vunmap(p, 1);
102041+ __vunmap(x, 1);
102042 }
102043 }
102044
102045+static void vunmap_work(struct work_struct *w)
102046+{
102047+ struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
102048+ struct llist_node *llnode = llist_del_all(&p->list);
102049+ while (llnode) {
102050+ void *x = llnode;
102051+ llnode = llist_next(llnode);
102052+ __vunmap(x, 0);
102053+ }
102054+}
102055+
102056+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
102057+static void unmap_work(struct work_struct *w)
102058+{
102059+ struct stack_deferred *p = container_of(w, struct stack_deferred, wq);
102060+ struct llist_node *llnode = llist_del_all(&p->list.list);
102061+ while (llnode) {
102062+ struct stack_deferred_llist *x =
102063+ llist_entry((struct llist_head *)llnode,
102064+ struct stack_deferred_llist, list);
102065+ void *stack = ACCESS_ONCE(x->stack);
102066+ void *lowmem_stack = ACCESS_ONCE(x->lowmem_stack);
102067+ llnode = llist_next(llnode);
102068+ __vunmap(stack, 0);
102069+ free_kmem_pages((unsigned long)lowmem_stack, THREAD_SIZE_ORDER);
102070+ }
102071+}
102072+#endif
102073+
102074 /*** Page table manipulation functions ***/
102075
102076 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
102077@@ -61,8 +106,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
102078
102079 pte = pte_offset_kernel(pmd, addr);
102080 do {
102081- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
102082- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
102083+
102084+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
102085+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
102086+ BUG_ON(!pte_exec(*pte));
102087+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
102088+ continue;
102089+ }
102090+#endif
102091+
102092+ {
102093+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
102094+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
102095+ }
102096 } while (pte++, addr += PAGE_SIZE, addr != end);
102097 }
102098
102099@@ -122,16 +178,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
102100 pte = pte_alloc_kernel(pmd, addr);
102101 if (!pte)
102102 return -ENOMEM;
102103+
102104+ pax_open_kernel();
102105 do {
102106 struct page *page = pages[*nr];
102107
102108- if (WARN_ON(!pte_none(*pte)))
102109+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
102110+ if (pgprot_val(prot) & _PAGE_NX)
102111+#endif
102112+
102113+ if (!pte_none(*pte)) {
102114+ pax_close_kernel();
102115+ WARN_ON(1);
102116 return -EBUSY;
102117- if (WARN_ON(!page))
102118+ }
102119+ if (!page) {
102120+ pax_close_kernel();
102121+ WARN_ON(1);
102122 return -ENOMEM;
102123+ }
102124 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
102125 (*nr)++;
102126 } while (pte++, addr += PAGE_SIZE, addr != end);
102127+ pax_close_kernel();
102128 return 0;
102129 }
102130
102131@@ -141,7 +210,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
102132 pmd_t *pmd;
102133 unsigned long next;
102134
102135- pmd = pmd_alloc(&init_mm, pud, addr);
102136+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
102137 if (!pmd)
102138 return -ENOMEM;
102139 do {
102140@@ -158,7 +227,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
102141 pud_t *pud;
102142 unsigned long next;
102143
102144- pud = pud_alloc(&init_mm, pgd, addr);
102145+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
102146 if (!pud)
102147 return -ENOMEM;
102148 do {
102149@@ -218,6 +287,12 @@ int is_vmalloc_or_module_addr(const void *x)
102150 if (addr >= MODULES_VADDR && addr < MODULES_END)
102151 return 1;
102152 #endif
102153+
102154+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
102155+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
102156+ return 1;
102157+#endif
102158+
102159 return is_vmalloc_addr(x);
102160 }
102161
102162@@ -238,8 +313,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
102163
102164 if (!pgd_none(*pgd)) {
102165 pud_t *pud = pud_offset(pgd, addr);
102166+#ifdef CONFIG_X86
102167+ if (!pud_large(*pud))
102168+#endif
102169 if (!pud_none(*pud)) {
102170 pmd_t *pmd = pmd_offset(pud, addr);
102171+#ifdef CONFIG_X86
102172+ if (!pmd_large(*pmd))
102173+#endif
102174 if (!pmd_none(*pmd)) {
102175 pte_t *ptep, pte;
102176
102177@@ -341,7 +422,7 @@ static void purge_vmap_area_lazy(void);
102178 * Allocate a region of KVA of the specified size and alignment, within the
102179 * vstart and vend.
102180 */
102181-static struct vmap_area *alloc_vmap_area(unsigned long size,
102182+static struct vmap_area * __size_overflow(1) alloc_vmap_area(unsigned long size,
102183 unsigned long align,
102184 unsigned long vstart, unsigned long vend,
102185 int node, gfp_t gfp_mask)
102186@@ -1182,13 +1263,27 @@ void __init vmalloc_init(void)
102187 for_each_possible_cpu(i) {
102188 struct vmap_block_queue *vbq;
102189 struct vfree_deferred *p;
102190+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
102191+ struct stack_deferred *p2;
102192+#endif
102193
102194 vbq = &per_cpu(vmap_block_queue, i);
102195 spin_lock_init(&vbq->lock);
102196 INIT_LIST_HEAD(&vbq->free);
102197+
102198 p = &per_cpu(vfree_deferred, i);
102199 init_llist_head(&p->list);
102200- INIT_WORK(&p->wq, free_work);
102201+ INIT_WORK(&p->wq, vfree_work);
102202+
102203+ p = &per_cpu(vunmap_deferred, i);
102204+ init_llist_head(&p->list);
102205+ INIT_WORK(&p->wq, vunmap_work);
102206+
102207+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
102208+ p2 = &per_cpu(stack_deferred, i);
102209+ init_llist_head(&p2->list.list);
102210+ INIT_WORK(&p2->wq, unmap_work);
102211+#endif
102212 }
102213
102214 /* Import existing vmlist entries. */
102215@@ -1313,6 +1408,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
102216 struct vm_struct *area;
102217
102218 BUG_ON(in_interrupt());
102219+
102220+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
102221+ if (flags & VM_KERNEXEC) {
102222+ if (start != VMALLOC_START || end != VMALLOC_END)
102223+ return NULL;
102224+ start = (unsigned long)MODULES_EXEC_VADDR;
102225+ end = (unsigned long)MODULES_EXEC_END;
102226+ }
102227+#endif
102228+
102229 if (flags & VM_IOREMAP)
102230 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
102231
102232@@ -1510,13 +1615,36 @@ EXPORT_SYMBOL(vfree);
102233 */
102234 void vunmap(const void *addr)
102235 {
102236- BUG_ON(in_interrupt());
102237- might_sleep();
102238- if (addr)
102239+ if (!addr)
102240+ return;
102241+ if (unlikely(in_interrupt())) {
102242+ struct vfree_deferred *p = this_cpu_ptr(&vunmap_deferred);
102243+ if (llist_add((struct llist_node *)addr, &p->list))
102244+ schedule_work(&p->wq);
102245+ } else {
102246+ might_sleep();
102247 __vunmap(addr, 0);
102248+ }
102249 }
102250 EXPORT_SYMBOL(vunmap);
102251
102252+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
102253+void unmap_process_stacks(struct task_struct *task)
102254+{
102255+ if (unlikely(in_interrupt())) {
102256+ struct stack_deferred *p = this_cpu_ptr(&stack_deferred);
102257+ struct stack_deferred_llist *list = task->stack;
102258+ list->stack = task->stack;
102259+ list->lowmem_stack = task->lowmem_stack;
102260+ if (llist_add((struct llist_node *)&list->list, &p->list.list))
102261+ schedule_work(&p->wq);
102262+ } else {
102263+ __vunmap(task->stack, 0);
102264+ free_kmem_pages((unsigned long)task->lowmem_stack, THREAD_SIZE_ORDER);
102265+ }
102266+}
102267+#endif
102268+
102269 /**
102270 * vmap - map an array of pages into virtually contiguous space
102271 * @pages: array of page pointers
102272@@ -1537,6 +1665,11 @@ void *vmap(struct page **pages, unsigned int count,
102273 if (count > totalram_pages)
102274 return NULL;
102275
102276+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
102277+ if (!(pgprot_val(prot) & _PAGE_NX))
102278+ flags |= VM_KERNEXEC;
102279+#endif
102280+
102281 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
102282 __builtin_return_address(0));
102283 if (!area)
102284@@ -1641,6 +1774,14 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
102285 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
102286 goto fail;
102287
102288+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
102289+ if (!(pgprot_val(prot) & _PAGE_NX)) {
102290+ vm_flags |= VM_KERNEXEC;
102291+ start = VMALLOC_START;
102292+ end = VMALLOC_END;
102293+ }
102294+#endif
102295+
102296 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
102297 vm_flags, start, end, node, gfp_mask, caller);
102298 if (!area)
102299@@ -1817,10 +1958,9 @@ EXPORT_SYMBOL(vzalloc_node);
102300 * For tight control over page level allocator and protection flags
102301 * use __vmalloc() instead.
102302 */
102303-
102304 void *vmalloc_exec(unsigned long size)
102305 {
102306- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
102307+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
102308 NUMA_NO_NODE, __builtin_return_address(0));
102309 }
102310
102311@@ -2127,6 +2267,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
102312 {
102313 struct vm_struct *area;
102314
102315+ BUG_ON(vma->vm_mirror);
102316+
102317 size = PAGE_ALIGN(size);
102318
102319 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
102320@@ -2609,7 +2751,11 @@ static int s_show(struct seq_file *m, void *p)
102321 v->addr, v->addr + v->size, v->size);
102322
102323 if (v->caller)
102324+#ifdef CONFIG_GRKERNSEC_HIDESYM
102325+ seq_printf(m, " %pK", v->caller);
102326+#else
102327 seq_printf(m, " %pS", v->caller);
102328+#endif
102329
102330 if (v->nr_pages)
102331 seq_printf(m, " pages=%d", v->nr_pages);
102332diff --git a/mm/vmstat.c b/mm/vmstat.c
102333index 4f5cd97..9fb715a 100644
102334--- a/mm/vmstat.c
102335+++ b/mm/vmstat.c
102336@@ -27,6 +27,7 @@
102337 #include <linux/mm_inline.h>
102338 #include <linux/page_ext.h>
102339 #include <linux/page_owner.h>
102340+#include <linux/grsecurity.h>
102341
102342 #include "internal.h"
102343
102344@@ -86,7 +87,7 @@ void vm_events_fold_cpu(int cpu)
102345 *
102346 * vm_stat contains the global counters
102347 */
102348-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
102349+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
102350 EXPORT_SYMBOL(vm_stat);
102351
102352 #ifdef CONFIG_SMP
102353@@ -438,7 +439,7 @@ static int fold_diff(int *diff)
102354
102355 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
102356 if (diff[i]) {
102357- atomic_long_add(diff[i], &vm_stat[i]);
102358+ atomic_long_add_unchecked(diff[i], &vm_stat[i]);
102359 changes++;
102360 }
102361 return changes;
102362@@ -476,7 +477,7 @@ static int refresh_cpu_vm_stats(void)
102363 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
102364 if (v) {
102365
102366- atomic_long_add(v, &zone->vm_stat[i]);
102367+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
102368 global_diff[i] += v;
102369 #ifdef CONFIG_NUMA
102370 /* 3 seconds idle till flush */
102371@@ -540,7 +541,7 @@ void cpu_vm_stats_fold(int cpu)
102372
102373 v = p->vm_stat_diff[i];
102374 p->vm_stat_diff[i] = 0;
102375- atomic_long_add(v, &zone->vm_stat[i]);
102376+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
102377 global_diff[i] += v;
102378 }
102379 }
102380@@ -560,8 +561,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
102381 if (pset->vm_stat_diff[i]) {
102382 int v = pset->vm_stat_diff[i];
102383 pset->vm_stat_diff[i] = 0;
102384- atomic_long_add(v, &zone->vm_stat[i]);
102385- atomic_long_add(v, &vm_stat[i]);
102386+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
102387+ atomic_long_add_unchecked(v, &vm_stat[i]);
102388 }
102389 }
102390 #endif
102391@@ -1293,10 +1294,22 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
102392 stat_items_size += sizeof(struct vm_event_state);
102393 #endif
102394
102395- v = kmalloc(stat_items_size, GFP_KERNEL);
102396+ v = kzalloc(stat_items_size, GFP_KERNEL);
102397 m->private = v;
102398 if (!v)
102399 return ERR_PTR(-ENOMEM);
102400+
102401+#ifdef CONFIG_GRKERNSEC_PROC_ADD
102402+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
102403+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
102404+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
102405+ && !in_group_p(grsec_proc_gid)
102406+#endif
102407+ )
102408+ return (unsigned long *)m->private + *pos;
102409+#endif
102410+#endif
102411+
102412 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
102413 v[i] = global_page_state(i);
102414 v += NR_VM_ZONE_STAT_ITEMS;
102415@@ -1528,10 +1541,16 @@ static int __init setup_vmstat(void)
102416 cpu_notifier_register_done();
102417 #endif
102418 #ifdef CONFIG_PROC_FS
102419- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
102420- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
102421- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
102422- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
102423+ {
102424+ mode_t gr_mode = S_IRUGO;
102425+#ifdef CONFIG_GRKERNSEC_PROC_ADD
102426+ gr_mode = S_IRUSR;
102427+#endif
102428+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
102429+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
102430+ proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
102431+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
102432+ }
102433 #endif
102434 return 0;
102435 }
102436diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
102437index 64c6bed..b79a5de 100644
102438--- a/net/8021q/vlan.c
102439+++ b/net/8021q/vlan.c
102440@@ -481,7 +481,7 @@ out:
102441 return NOTIFY_DONE;
102442 }
102443
102444-static struct notifier_block vlan_notifier_block __read_mostly = {
102445+static struct notifier_block vlan_notifier_block = {
102446 .notifier_call = vlan_device_event,
102447 };
102448
102449@@ -556,8 +556,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
102450 err = -EPERM;
102451 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
102452 break;
102453- if ((args.u.name_type >= 0) &&
102454- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
102455+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
102456 struct vlan_net *vn;
102457
102458 vn = net_generic(net, vlan_net_id);
102459diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
102460index c92b52f..006c052 100644
102461--- a/net/8021q/vlan_netlink.c
102462+++ b/net/8021q/vlan_netlink.c
102463@@ -245,7 +245,7 @@ static struct net *vlan_get_link_net(const struct net_device *dev)
102464 return dev_net(real_dev);
102465 }
102466
102467-struct rtnl_link_ops vlan_link_ops __read_mostly = {
102468+struct rtnl_link_ops vlan_link_ops = {
102469 .kind = "vlan",
102470 .maxtype = IFLA_VLAN_MAX,
102471 .policy = vlan_policy,
102472diff --git a/net/9p/client.c b/net/9p/client.c
102473index e86a9bea..e91f70e 100644
102474--- a/net/9p/client.c
102475+++ b/net/9p/client.c
102476@@ -596,7 +596,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
102477 len - inline_len);
102478 } else {
102479 err = copy_from_user(ename + inline_len,
102480- uidata, len - inline_len);
102481+ (char __force_user *)uidata, len - inline_len);
102482 if (err) {
102483 err = -EFAULT;
102484 goto out_err;
102485@@ -1570,7 +1570,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
102486 kernel_buf = 1;
102487 indata = data;
102488 } else
102489- indata = (__force char *)udata;
102490+ indata = (__force_kernel char *)udata;
102491 /*
102492 * response header len is 11
102493 * PDU Header(7) + IO Size (4)
102494@@ -1645,7 +1645,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
102495 kernel_buf = 1;
102496 odata = data;
102497 } else
102498- odata = (char *)udata;
102499+ odata = (char __force_kernel *)udata;
102500 req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
102501 P9_ZC_HDR_SZ, kernel_buf, "dqd",
102502 fid->fid, offset, rsize);
102503diff --git a/net/9p/mod.c b/net/9p/mod.c
102504index 6ab36ae..6f1841b 100644
102505--- a/net/9p/mod.c
102506+++ b/net/9p/mod.c
102507@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
102508 void v9fs_register_trans(struct p9_trans_module *m)
102509 {
102510 spin_lock(&v9fs_trans_lock);
102511- list_add_tail(&m->list, &v9fs_trans_list);
102512+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
102513 spin_unlock(&v9fs_trans_lock);
102514 }
102515 EXPORT_SYMBOL(v9fs_register_trans);
102516@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
102517 void v9fs_unregister_trans(struct p9_trans_module *m)
102518 {
102519 spin_lock(&v9fs_trans_lock);
102520- list_del_init(&m->list);
102521+ pax_list_del_init((struct list_head *)&m->list);
102522 spin_unlock(&v9fs_trans_lock);
102523 }
102524 EXPORT_SYMBOL(v9fs_unregister_trans);
102525diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
102526index 80d08f6..de63fd1 100644
102527--- a/net/9p/trans_fd.c
102528+++ b/net/9p/trans_fd.c
102529@@ -428,7 +428,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
102530 oldfs = get_fs();
102531 set_fs(get_ds());
102532 /* The cast to a user pointer is valid due to the set_fs() */
102533- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
102534+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
102535 set_fs(oldfs);
102536
102537 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
102538diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
102539index af46bc4..f9adfcd 100644
102540--- a/net/appletalk/atalk_proc.c
102541+++ b/net/appletalk/atalk_proc.c
102542@@ -256,7 +256,7 @@ int __init atalk_proc_init(void)
102543 struct proc_dir_entry *p;
102544 int rc = -ENOMEM;
102545
102546- atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net);
102547+ atalk_proc_dir = proc_mkdir_restrict("atalk", init_net.proc_net);
102548 if (!atalk_proc_dir)
102549 goto out;
102550
102551diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
102552index 876fbe8..8bbea9f 100644
102553--- a/net/atm/atm_misc.c
102554+++ b/net/atm/atm_misc.c
102555@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
102556 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
102557 return 1;
102558 atm_return(vcc, truesize);
102559- atomic_inc(&vcc->stats->rx_drop);
102560+ atomic_inc_unchecked(&vcc->stats->rx_drop);
102561 return 0;
102562 }
102563 EXPORT_SYMBOL(atm_charge);
102564@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
102565 }
102566 }
102567 atm_return(vcc, guess);
102568- atomic_inc(&vcc->stats->rx_drop);
102569+ atomic_inc_unchecked(&vcc->stats->rx_drop);
102570 return NULL;
102571 }
102572 EXPORT_SYMBOL(atm_alloc_charge);
102573@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
102574
102575 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
102576 {
102577-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
102578+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
102579 __SONET_ITEMS
102580 #undef __HANDLE_ITEM
102581 }
102582@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
102583
102584 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
102585 {
102586-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
102587+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
102588 __SONET_ITEMS
102589 #undef __HANDLE_ITEM
102590 }
102591diff --git a/net/atm/lec.c b/net/atm/lec.c
102592index 4b98f89..5a2f6cb 100644
102593--- a/net/atm/lec.c
102594+++ b/net/atm/lec.c
102595@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
102596 }
102597
102598 static struct lane2_ops lane2_ops = {
102599- lane2_resolve, /* resolve, spec 3.1.3 */
102600- lane2_associate_req, /* associate_req, spec 3.1.4 */
102601- NULL /* associate indicator, spec 3.1.5 */
102602+ .resolve = lane2_resolve,
102603+ .associate_req = lane2_associate_req,
102604+ .associate_indicator = NULL
102605 };
102606
102607 static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
102608diff --git a/net/atm/lec.h b/net/atm/lec.h
102609index 4149db1..f2ab682 100644
102610--- a/net/atm/lec.h
102611+++ b/net/atm/lec.h
102612@@ -48,7 +48,7 @@ struct lane2_ops {
102613 const u8 *tlvs, u32 sizeoftlvs);
102614 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
102615 const u8 *tlvs, u32 sizeoftlvs);
102616-};
102617+} __no_const;
102618
102619 /*
102620 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
102621diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
102622index d1b2d9a..d549f7f 100644
102623--- a/net/atm/mpoa_caches.c
102624+++ b/net/atm/mpoa_caches.c
102625@@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
102626
102627
102628 static struct in_cache_ops ingress_ops = {
102629- in_cache_add_entry, /* add_entry */
102630- in_cache_get, /* get */
102631- in_cache_get_with_mask, /* get_with_mask */
102632- in_cache_get_by_vcc, /* get_by_vcc */
102633- in_cache_put, /* put */
102634- in_cache_remove_entry, /* remove_entry */
102635- cache_hit, /* cache_hit */
102636- clear_count_and_expired, /* clear_count */
102637- check_resolving_entries, /* check_resolving */
102638- refresh_entries, /* refresh */
102639- in_destroy_cache /* destroy_cache */
102640+ .add_entry = in_cache_add_entry,
102641+ .get = in_cache_get,
102642+ .get_with_mask = in_cache_get_with_mask,
102643+ .get_by_vcc = in_cache_get_by_vcc,
102644+ .put = in_cache_put,
102645+ .remove_entry = in_cache_remove_entry,
102646+ .cache_hit = cache_hit,
102647+ .clear_count = clear_count_and_expired,
102648+ .check_resolving = check_resolving_entries,
102649+ .refresh = refresh_entries,
102650+ .destroy_cache = in_destroy_cache
102651 };
102652
102653 static struct eg_cache_ops egress_ops = {
102654- eg_cache_add_entry, /* add_entry */
102655- eg_cache_get_by_cache_id, /* get_by_cache_id */
102656- eg_cache_get_by_tag, /* get_by_tag */
102657- eg_cache_get_by_vcc, /* get_by_vcc */
102658- eg_cache_get_by_src_ip, /* get_by_src_ip */
102659- eg_cache_put, /* put */
102660- eg_cache_remove_entry, /* remove_entry */
102661- update_eg_cache_entry, /* update */
102662- clear_expired, /* clear_expired */
102663- eg_destroy_cache /* destroy_cache */
102664+ .add_entry = eg_cache_add_entry,
102665+ .get_by_cache_id = eg_cache_get_by_cache_id,
102666+ .get_by_tag = eg_cache_get_by_tag,
102667+ .get_by_vcc = eg_cache_get_by_vcc,
102668+ .get_by_src_ip = eg_cache_get_by_src_ip,
102669+ .put = eg_cache_put,
102670+ .remove_entry = eg_cache_remove_entry,
102671+ .update = update_eg_cache_entry,
102672+ .clear_expired = clear_expired,
102673+ .destroy_cache = eg_destroy_cache
102674 };
102675
102676
102677diff --git a/net/atm/proc.c b/net/atm/proc.c
102678index bbb6461..cf04016 100644
102679--- a/net/atm/proc.c
102680+++ b/net/atm/proc.c
102681@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
102682 const struct k_atm_aal_stats *stats)
102683 {
102684 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
102685- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
102686- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
102687- atomic_read(&stats->rx_drop));
102688+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
102689+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
102690+ atomic_read_unchecked(&stats->rx_drop));
102691 }
102692
102693 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
102694diff --git a/net/atm/resources.c b/net/atm/resources.c
102695index 0447d5d..3cf4728 100644
102696--- a/net/atm/resources.c
102697+++ b/net/atm/resources.c
102698@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
102699 static void copy_aal_stats(struct k_atm_aal_stats *from,
102700 struct atm_aal_stats *to)
102701 {
102702-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
102703+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
102704 __AAL_STAT_ITEMS
102705 #undef __HANDLE_ITEM
102706 }
102707@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
102708 static void subtract_aal_stats(struct k_atm_aal_stats *from,
102709 struct atm_aal_stats *to)
102710 {
102711-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
102712+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
102713 __AAL_STAT_ITEMS
102714 #undef __HANDLE_ITEM
102715 }
102716diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
102717index 919a5ce..cc6b444 100644
102718--- a/net/ax25/sysctl_net_ax25.c
102719+++ b/net/ax25/sysctl_net_ax25.c
102720@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
102721 {
102722 char path[sizeof("net/ax25/") + IFNAMSIZ];
102723 int k;
102724- struct ctl_table *table;
102725+ ctl_table_no_const *table;
102726
102727 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
102728 if (!table)
102729diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
102730index 00e00e0..710fcd2 100644
102731--- a/net/batman-adv/bat_iv_ogm.c
102732+++ b/net/batman-adv/bat_iv_ogm.c
102733@@ -312,7 +312,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
102734
102735 /* randomize initial seqno to avoid collision */
102736 get_random_bytes(&random_seqno, sizeof(random_seqno));
102737- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
102738+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
102739
102740 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
102741 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
102742@@ -917,9 +917,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
102743 batadv_ogm_packet->tvlv_len = htons(tvlv_len);
102744
102745 /* change sequence number to network order */
102746- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
102747+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
102748 batadv_ogm_packet->seqno = htonl(seqno);
102749- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
102750+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
102751
102752 batadv_iv_ogm_slide_own_bcast_window(hard_iface);
102753
102754@@ -1596,7 +1596,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
102755 return;
102756
102757 /* could be changed by schedule_own_packet() */
102758- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
102759+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
102760
102761 if (ogm_packet->flags & BATADV_DIRECTLINK)
102762 has_directlink_flag = true;
102763diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
102764index 3d1dcaa..4699f4e 100644
102765--- a/net/batman-adv/fragmentation.c
102766+++ b/net/batman-adv/fragmentation.c
102767@@ -449,7 +449,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
102768 frag_header.packet_type = BATADV_UNICAST_FRAG;
102769 frag_header.version = BATADV_COMPAT_VERSION;
102770 frag_header.ttl = BATADV_TTL;
102771- frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
102772+ frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno));
102773 frag_header.reserved = 0;
102774 frag_header.no = 0;
102775 frag_header.total_size = htons(skb->len);
102776diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
102777index 5ec31d7..e371631 100644
102778--- a/net/batman-adv/soft-interface.c
102779+++ b/net/batman-adv/soft-interface.c
102780@@ -295,7 +295,7 @@ send:
102781 primary_if->net_dev->dev_addr);
102782
102783 /* set broadcast sequence number */
102784- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
102785+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
102786 bcast_packet->seqno = htonl(seqno);
102787
102788 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
102789@@ -760,7 +760,7 @@ static int batadv_softif_init_late(struct net_device *dev)
102790 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
102791
102792 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
102793- atomic_set(&bat_priv->bcast_seqno, 1);
102794+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
102795 atomic_set(&bat_priv->tt.vn, 0);
102796 atomic_set(&bat_priv->tt.local_changes, 0);
102797 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
102798@@ -774,7 +774,7 @@ static int batadv_softif_init_late(struct net_device *dev)
102799
102800 /* randomize initial seqno to avoid collision */
102801 get_random_bytes(&random_seqno, sizeof(random_seqno));
102802- atomic_set(&bat_priv->frag_seqno, random_seqno);
102803+ atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno);
102804
102805 bat_priv->primary_if = NULL;
102806 bat_priv->num_ifaces = 0;
102807@@ -982,7 +982,7 @@ int batadv_softif_is_valid(const struct net_device *net_dev)
102808 return 0;
102809 }
102810
102811-struct rtnl_link_ops batadv_link_ops __read_mostly = {
102812+struct rtnl_link_ops batadv_link_ops = {
102813 .kind = "batadv",
102814 .priv_size = sizeof(struct batadv_priv),
102815 .setup = batadv_softif_init_early,
102816diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
102817index 9398c3f..0e79657 100644
102818--- a/net/batman-adv/types.h
102819+++ b/net/batman-adv/types.h
102820@@ -67,7 +67,7 @@ enum batadv_dhcp_recipient {
102821 struct batadv_hard_iface_bat_iv {
102822 unsigned char *ogm_buff;
102823 int ogm_buff_len;
102824- atomic_t ogm_seqno;
102825+ atomic_unchecked_t ogm_seqno;
102826 };
102827
102828 /**
102829@@ -766,7 +766,7 @@ struct batadv_priv {
102830 atomic_t bonding;
102831 atomic_t fragmentation;
102832 atomic_t packet_size_max;
102833- atomic_t frag_seqno;
102834+ atomic_unchecked_t frag_seqno;
102835 #ifdef CONFIG_BATMAN_ADV_BLA
102836 atomic_t bridge_loop_avoidance;
102837 #endif
102838@@ -785,7 +785,7 @@ struct batadv_priv {
102839 #endif
102840 uint32_t isolation_mark;
102841 uint32_t isolation_mark_mask;
102842- atomic_t bcast_seqno;
102843+ atomic_unchecked_t bcast_seqno;
102844 atomic_t bcast_queue_left;
102845 atomic_t batman_queue_left;
102846 char num_ifaces;
102847diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
102848index 1d65c5b..43e55fd 100644
102849--- a/net/bluetooth/hci_sock.c
102850+++ b/net/bluetooth/hci_sock.c
102851@@ -1042,7 +1042,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
102852 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
102853 }
102854
102855- len = min_t(unsigned int, len, sizeof(uf));
102856+ len = min((size_t)len, sizeof(uf));
102857 if (copy_from_user(&uf, optval, len)) {
102858 err = -EFAULT;
102859 break;
102860diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
102861index 6ba33f9..4afc26f 100644
102862--- a/net/bluetooth/l2cap_core.c
102863+++ b/net/bluetooth/l2cap_core.c
102864@@ -3534,8 +3534,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
102865 break;
102866
102867 case L2CAP_CONF_RFC:
102868- if (olen == sizeof(rfc))
102869- memcpy(&rfc, (void *)val, olen);
102870+ if (olen != sizeof(rfc))
102871+ break;
102872+
102873+ memcpy(&rfc, (void *)val, olen);
102874
102875 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
102876 rfc.mode != chan->mode)
102877diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
102878index 60694f0..32623ed 100644
102879--- a/net/bluetooth/l2cap_sock.c
102880+++ b/net/bluetooth/l2cap_sock.c
102881@@ -633,7 +633,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
102882 struct sock *sk = sock->sk;
102883 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
102884 struct l2cap_options opts;
102885- int len, err = 0;
102886+ int err = 0;
102887+ size_t len = optlen;
102888 u32 opt;
102889
102890 BT_DBG("sk %p", sk);
102891@@ -660,7 +661,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
102892 opts.max_tx = chan->max_tx;
102893 opts.txwin_size = chan->tx_win;
102894
102895- len = min_t(unsigned int, sizeof(opts), optlen);
102896+ len = min(sizeof(opts), len);
102897 if (copy_from_user((char *) &opts, optval, len)) {
102898 err = -EFAULT;
102899 break;
102900@@ -747,7 +748,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
102901 struct bt_security sec;
102902 struct bt_power pwr;
102903 struct l2cap_conn *conn;
102904- int len, err = 0;
102905+ int err = 0;
102906+ size_t len = optlen;
102907 u32 opt;
102908
102909 BT_DBG("sk %p", sk);
102910@@ -771,7 +773,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
102911
102912 sec.level = BT_SECURITY_LOW;
102913
102914- len = min_t(unsigned int, sizeof(sec), optlen);
102915+ len = min(sizeof(sec), len);
102916 if (copy_from_user((char *) &sec, optval, len)) {
102917 err = -EFAULT;
102918 break;
102919@@ -867,7 +869,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
102920
102921 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
102922
102923- len = min_t(unsigned int, sizeof(pwr), optlen);
102924+ len = min(sizeof(pwr), len);
102925 if (copy_from_user((char *) &pwr, optval, len)) {
102926 err = -EFAULT;
102927 break;
102928diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
102929index 3c6d2c8..6afc970 100644
102930--- a/net/bluetooth/rfcomm/sock.c
102931+++ b/net/bluetooth/rfcomm/sock.c
102932@@ -686,7 +686,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
102933 struct sock *sk = sock->sk;
102934 struct bt_security sec;
102935 int err = 0;
102936- size_t len;
102937+ size_t len = optlen;
102938 u32 opt;
102939
102940 BT_DBG("sk %p", sk);
102941@@ -708,7 +708,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
102942
102943 sec.level = BT_SECURITY_LOW;
102944
102945- len = min_t(unsigned int, sizeof(sec), optlen);
102946+ len = min(sizeof(sec), len);
102947 if (copy_from_user((char *) &sec, optval, len)) {
102948 err = -EFAULT;
102949 break;
102950diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
102951index 8e385a0..a5bdd8e 100644
102952--- a/net/bluetooth/rfcomm/tty.c
102953+++ b/net/bluetooth/rfcomm/tty.c
102954@@ -752,7 +752,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
102955 BT_DBG("tty %p id %d", tty, tty->index);
102956
102957 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
102958- dev->channel, dev->port.count);
102959+ dev->channel, atomic_read(&dev->port.count));
102960
102961 err = tty_port_open(&dev->port, tty, filp);
102962 if (err)
102963@@ -775,7 +775,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
102964 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
102965
102966 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
102967- dev->port.count);
102968+ atomic_read(&dev->port.count));
102969
102970 tty_port_close(&dev->port, tty, filp);
102971 }
102972diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
102973index 4fbcea0..69a6786 100644
102974--- a/net/bridge/br_netlink.c
102975+++ b/net/bridge/br_netlink.c
102976@@ -726,7 +726,7 @@ static struct rtnl_af_ops br_af_ops __read_mostly = {
102977 .get_link_af_size = br_get_link_af_size,
102978 };
102979
102980-struct rtnl_link_ops br_link_ops __read_mostly = {
102981+struct rtnl_link_ops br_link_ops = {
102982 .kind = "bridge",
102983 .priv_size = sizeof(struct net_bridge),
102984 .setup = br_dev_setup,
102985diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
102986index 91180a7..1301daa 100644
102987--- a/net/bridge/netfilter/ebtables.c
102988+++ b/net/bridge/netfilter/ebtables.c
102989@@ -1533,7 +1533,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
102990 tmp.valid_hooks = t->table->valid_hooks;
102991 }
102992 mutex_unlock(&ebt_mutex);
102993- if (copy_to_user(user, &tmp, *len) != 0) {
102994+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
102995 BUGPRINT("c2u Didn't work\n");
102996 ret = -EFAULT;
102997 break;
102998@@ -2339,7 +2339,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
102999 goto out;
103000 tmp.valid_hooks = t->valid_hooks;
103001
103002- if (copy_to_user(user, &tmp, *len) != 0) {
103003+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
103004 ret = -EFAULT;
103005 break;
103006 }
103007@@ -2350,7 +2350,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
103008 tmp.entries_size = t->table->entries_size;
103009 tmp.valid_hooks = t->table->valid_hooks;
103010
103011- if (copy_to_user(user, &tmp, *len) != 0) {
103012+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
103013 ret = -EFAULT;
103014 break;
103015 }
103016diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
103017index f5afda1..dcf770a 100644
103018--- a/net/caif/cfctrl.c
103019+++ b/net/caif/cfctrl.c
103020@@ -10,6 +10,7 @@
103021 #include <linux/spinlock.h>
103022 #include <linux/slab.h>
103023 #include <linux/pkt_sched.h>
103024+#include <linux/sched.h>
103025 #include <net/caif/caif_layer.h>
103026 #include <net/caif/cfpkt.h>
103027 #include <net/caif/cfctrl.h>
103028@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
103029 memset(&dev_info, 0, sizeof(dev_info));
103030 dev_info.id = 0xff;
103031 cfsrvl_init(&this->serv, 0, &dev_info, false);
103032- atomic_set(&this->req_seq_no, 1);
103033- atomic_set(&this->rsp_seq_no, 1);
103034+ atomic_set_unchecked(&this->req_seq_no, 1);
103035+ atomic_set_unchecked(&this->rsp_seq_no, 1);
103036 this->serv.layer.receive = cfctrl_recv;
103037 sprintf(this->serv.layer.name, "ctrl");
103038 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
103039@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
103040 struct cfctrl_request_info *req)
103041 {
103042 spin_lock_bh(&ctrl->info_list_lock);
103043- atomic_inc(&ctrl->req_seq_no);
103044- req->sequence_no = atomic_read(&ctrl->req_seq_no);
103045+ atomic_inc_unchecked(&ctrl->req_seq_no);
103046+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
103047 list_add_tail(&req->list, &ctrl->list);
103048 spin_unlock_bh(&ctrl->info_list_lock);
103049 }
103050@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
103051 if (p != first)
103052 pr_warn("Requests are not received in order\n");
103053
103054- atomic_set(&ctrl->rsp_seq_no,
103055+ atomic_set_unchecked(&ctrl->rsp_seq_no,
103056 p->sequence_no);
103057 list_del(&p->list);
103058 goto out;
103059diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
103060index 67a4a36..8d28068 100644
103061--- a/net/caif/chnl_net.c
103062+++ b/net/caif/chnl_net.c
103063@@ -515,7 +515,7 @@ static const struct nla_policy ipcaif_policy[IFLA_CAIF_MAX + 1] = {
103064 };
103065
103066
103067-static struct rtnl_link_ops ipcaif_link_ops __read_mostly = {
103068+static struct rtnl_link_ops ipcaif_link_ops = {
103069 .kind = "caif",
103070 .priv_size = sizeof(struct chnl_net),
103071 .setup = ipcaif_net_setup,
103072diff --git a/net/can/af_can.c b/net/can/af_can.c
103073index 32d710e..93bcf05 100644
103074--- a/net/can/af_can.c
103075+++ b/net/can/af_can.c
103076@@ -884,7 +884,7 @@ static const struct net_proto_family can_family_ops = {
103077 };
103078
103079 /* notifier block for netdevice event */
103080-static struct notifier_block can_netdev_notifier __read_mostly = {
103081+static struct notifier_block can_netdev_notifier = {
103082 .notifier_call = can_notifier,
103083 };
103084
103085diff --git a/net/can/bcm.c b/net/can/bcm.c
103086index ee9ffd9..dfdf3d4 100644
103087--- a/net/can/bcm.c
103088+++ b/net/can/bcm.c
103089@@ -1619,7 +1619,7 @@ static int __init bcm_module_init(void)
103090 }
103091
103092 /* create /proc/net/can-bcm directory */
103093- proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
103094+ proc_dir = proc_mkdir_restrict("can-bcm", init_net.proc_net);
103095 return 0;
103096 }
103097
103098diff --git a/net/can/gw.c b/net/can/gw.c
103099index a6f448e..5902171 100644
103100--- a/net/can/gw.c
103101+++ b/net/can/gw.c
103102@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
103103 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
103104
103105 static HLIST_HEAD(cgw_list);
103106-static struct notifier_block notifier;
103107
103108 static struct kmem_cache *cgw_cache __read_mostly;
103109
103110@@ -948,6 +947,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
103111 return err;
103112 }
103113
103114+static struct notifier_block notifier = {
103115+ .notifier_call = cgw_notifier
103116+};
103117+
103118 static __init int cgw_module_init(void)
103119 {
103120 /* sanitize given module parameter */
103121@@ -963,7 +966,6 @@ static __init int cgw_module_init(void)
103122 return -ENOMEM;
103123
103124 /* set notifier */
103125- notifier.notifier_call = cgw_notifier;
103126 register_netdevice_notifier(&notifier);
103127
103128 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
103129diff --git a/net/can/proc.c b/net/can/proc.c
103130index 1a19b98..df2b4ec 100644
103131--- a/net/can/proc.c
103132+++ b/net/can/proc.c
103133@@ -514,7 +514,7 @@ static void can_remove_proc_readentry(const char *name)
103134 void can_init_proc(void)
103135 {
103136 /* create /proc/net/can directory */
103137- can_dir = proc_mkdir("can", init_net.proc_net);
103138+ can_dir = proc_mkdir_restrict("can", init_net.proc_net);
103139
103140 if (!can_dir) {
103141 printk(KERN_INFO "can: failed to create /proc/net/can . "
103142diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
103143index a9f4ae4..ee19b92 100644
103144--- a/net/ceph/messenger.c
103145+++ b/net/ceph/messenger.c
103146@@ -188,7 +188,7 @@ static void con_fault(struct ceph_connection *con);
103147 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
103148
103149 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
103150-static atomic_t addr_str_seq = ATOMIC_INIT(0);
103151+static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
103152
103153 static struct page *zero_page; /* used in certain error cases */
103154
103155@@ -199,7 +199,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
103156 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
103157 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
103158
103159- i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
103160+ i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
103161 s = addr_str[i];
103162
103163 switch (ss->ss_family) {
103164diff --git a/net/compat.c b/net/compat.c
103165index f7bd286..76ea56a 100644
103166--- a/net/compat.c
103167+++ b/net/compat.c
103168@@ -100,20 +100,20 @@ ssize_t get_compat_msghdr(struct msghdr *kmsg,
103169
103170 #define CMSG_COMPAT_FIRSTHDR(msg) \
103171 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
103172- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
103173+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
103174 (struct compat_cmsghdr __user *)NULL)
103175
103176 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
103177 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
103178 (ucmlen) <= (unsigned long) \
103179 ((mhdr)->msg_controllen - \
103180- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
103181+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
103182
103183 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
103184 struct compat_cmsghdr __user *cmsg, int cmsg_len)
103185 {
103186 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
103187- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
103188+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
103189 msg->msg_controllen)
103190 return NULL;
103191 return (struct compat_cmsghdr __user *)ptr;
103192@@ -203,7 +203,7 @@ Efault:
103193
103194 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
103195 {
103196- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
103197+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
103198 struct compat_cmsghdr cmhdr;
103199 struct compat_timeval ctv;
103200 struct compat_timespec cts[3];
103201@@ -259,7 +259,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
103202
103203 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
103204 {
103205- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
103206+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
103207 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
103208 int fdnum = scm->fp->count;
103209 struct file **fp = scm->fp->fp;
103210@@ -347,7 +347,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
103211 return -EFAULT;
103212 old_fs = get_fs();
103213 set_fs(KERNEL_DS);
103214- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
103215+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
103216 set_fs(old_fs);
103217
103218 return err;
103219@@ -408,7 +408,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
103220 len = sizeof(ktime);
103221 old_fs = get_fs();
103222 set_fs(KERNEL_DS);
103223- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
103224+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
103225 set_fs(old_fs);
103226
103227 if (!err) {
103228@@ -551,7 +551,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
103229 case MCAST_JOIN_GROUP:
103230 case MCAST_LEAVE_GROUP:
103231 {
103232- struct compat_group_req __user *gr32 = (void *)optval;
103233+ struct compat_group_req __user *gr32 = (void __user *)optval;
103234 struct group_req __user *kgr =
103235 compat_alloc_user_space(sizeof(struct group_req));
103236 u32 interface;
103237@@ -572,7 +572,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
103238 case MCAST_BLOCK_SOURCE:
103239 case MCAST_UNBLOCK_SOURCE:
103240 {
103241- struct compat_group_source_req __user *gsr32 = (void *)optval;
103242+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
103243 struct group_source_req __user *kgsr = compat_alloc_user_space(
103244 sizeof(struct group_source_req));
103245 u32 interface;
103246@@ -593,7 +593,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
103247 }
103248 case MCAST_MSFILTER:
103249 {
103250- struct compat_group_filter __user *gf32 = (void *)optval;
103251+ struct compat_group_filter __user *gf32 = (void __user *)optval;
103252 struct group_filter __user *kgf;
103253 u32 interface, fmode, numsrc;
103254
103255@@ -631,7 +631,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
103256 char __user *optval, int __user *optlen,
103257 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
103258 {
103259- struct compat_group_filter __user *gf32 = (void *)optval;
103260+ struct compat_group_filter __user *gf32 = (void __user *)optval;
103261 struct group_filter __user *kgf;
103262 int __user *koptlen;
103263 u32 interface, fmode, numsrc;
103264@@ -775,7 +775,7 @@ COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
103265
103266 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
103267 return -EINVAL;
103268- if (copy_from_user(a, args, nas[call]))
103269+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
103270 return -EFAULT;
103271 a0 = a[0];
103272 a1 = a[1];
103273diff --git a/net/core/datagram.c b/net/core/datagram.c
103274index df493d6..1145766 100644
103275--- a/net/core/datagram.c
103276+++ b/net/core/datagram.c
103277@@ -302,7 +302,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
103278 }
103279
103280 kfree_skb(skb);
103281- atomic_inc(&sk->sk_drops);
103282+ atomic_inc_unchecked(&sk->sk_drops);
103283 sk_mem_reclaim_partial(sk);
103284
103285 return err;
103286diff --git a/net/core/dev.c b/net/core/dev.c
103287index 22a53ac..1d19af7 100644
103288--- a/net/core/dev.c
103289+++ b/net/core/dev.c
103290@@ -1681,14 +1681,14 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
103291 {
103292 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
103293 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
103294- atomic_long_inc(&dev->rx_dropped);
103295+ atomic_long_inc_unchecked(&dev->rx_dropped);
103296 kfree_skb(skb);
103297 return NET_RX_DROP;
103298 }
103299 }
103300
103301 if (unlikely(!is_skb_forwardable(dev, skb))) {
103302- atomic_long_inc(&dev->rx_dropped);
103303+ atomic_long_inc_unchecked(&dev->rx_dropped);
103304 kfree_skb(skb);
103305 return NET_RX_DROP;
103306 }
103307@@ -2987,7 +2987,7 @@ recursion_alert:
103308 drop:
103309 rcu_read_unlock_bh();
103310
103311- atomic_long_inc(&dev->tx_dropped);
103312+ atomic_long_inc_unchecked(&dev->tx_dropped);
103313 kfree_skb_list(skb);
103314 return rc;
103315 out:
103316@@ -3336,7 +3336,7 @@ enqueue:
103317
103318 local_irq_restore(flags);
103319
103320- atomic_long_inc(&skb->dev->rx_dropped);
103321+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
103322 kfree_skb(skb);
103323 return NET_RX_DROP;
103324 }
103325@@ -3413,7 +3413,7 @@ int netif_rx_ni(struct sk_buff *skb)
103326 }
103327 EXPORT_SYMBOL(netif_rx_ni);
103328
103329-static void net_tx_action(struct softirq_action *h)
103330+static __latent_entropy void net_tx_action(void)
103331 {
103332 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
103333
103334@@ -3751,7 +3751,7 @@ ncls:
103335 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
103336 } else {
103337 drop:
103338- atomic_long_inc(&skb->dev->rx_dropped);
103339+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
103340 kfree_skb(skb);
103341 /* Jamal, now you will not able to escape explaining
103342 * me how you were going to use this. :-)
103343@@ -4640,7 +4640,7 @@ out_unlock:
103344 return work;
103345 }
103346
103347-static void net_rx_action(struct softirq_action *h)
103348+static __latent_entropy void net_rx_action(void)
103349 {
103350 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
103351 unsigned long time_limit = jiffies + 2;
103352@@ -6676,8 +6676,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
103353 } else {
103354 netdev_stats_to_stats64(storage, &dev->stats);
103355 }
103356- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
103357- storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
103358+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
103359+ storage->tx_dropped += atomic_long_read_unchecked(&dev->tx_dropped);
103360 return storage;
103361 }
103362 EXPORT_SYMBOL(dev_get_stats);
103363diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
103364index b94b1d2..da3ed7c 100644
103365--- a/net/core/dev_ioctl.c
103366+++ b/net/core/dev_ioctl.c
103367@@ -368,8 +368,13 @@ void dev_load(struct net *net, const char *name)
103368 no_module = !dev;
103369 if (no_module && capable(CAP_NET_ADMIN))
103370 no_module = request_module("netdev-%s", name);
103371- if (no_module && capable(CAP_SYS_MODULE))
103372+ if (no_module && capable(CAP_SYS_MODULE)) {
103373+#ifdef CONFIG_GRKERNSEC_MODHARDEN
103374+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
103375+#else
103376 request_module("%s", name);
103377+#endif
103378+ }
103379 }
103380 EXPORT_SYMBOL(dev_load);
103381
103382diff --git a/net/core/filter.c b/net/core/filter.c
103383index f6bdc2b..76eba8e 100644
103384--- a/net/core/filter.c
103385+++ b/net/core/filter.c
103386@@ -533,7 +533,11 @@ do_pass:
103387
103388 /* Unknown instruction. */
103389 default:
103390- goto err;
103391+ WARN(1, KERN_ALERT "Unknown sock filter code:%u jt:%u tf:%u k:%u\n",
103392+ fp->code, fp->jt, fp->jf, fp->k);
103393+ kfree(addrs);
103394+ BUG();
103395+ return -EINVAL;
103396 }
103397
103398 insn++;
103399@@ -577,7 +581,7 @@ static int check_load_and_stores(const struct sock_filter *filter, int flen)
103400 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
103401 int pc, ret = 0;
103402
103403- BUILD_BUG_ON(BPF_MEMWORDS > 16);
103404+ BUILD_BUG_ON(BPF_MEMWORDS != 16);
103405
103406 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
103407 if (!masks)
103408@@ -992,7 +996,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
103409 if (!fp)
103410 return -ENOMEM;
103411
103412- memcpy(fp->insns, fprog->filter, fsize);
103413+ memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize);
103414
103415 fp->len = fprog->len;
103416 /* Since unattached filters are not copied back to user
103417diff --git a/net/core/flow.c b/net/core/flow.c
103418index 1033725..340f65d 100644
103419--- a/net/core/flow.c
103420+++ b/net/core/flow.c
103421@@ -65,7 +65,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
103422 static int flow_entry_valid(struct flow_cache_entry *fle,
103423 struct netns_xfrm *xfrm)
103424 {
103425- if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
103426+ if (atomic_read_unchecked(&xfrm->flow_cache_genid) != fle->genid)
103427 return 0;
103428 if (fle->object && !fle->object->ops->check(fle->object))
103429 return 0;
103430@@ -242,7 +242,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
103431 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
103432 fcp->hash_count++;
103433 }
103434- } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
103435+ } else if (likely(fle->genid == atomic_read_unchecked(&net->xfrm.flow_cache_genid))) {
103436 flo = fle->object;
103437 if (!flo)
103438 goto ret_object;
103439@@ -263,7 +263,7 @@ nocache:
103440 }
103441 flo = resolver(net, key, family, dir, flo, ctx);
103442 if (fle) {
103443- fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
103444+ fle->genid = atomic_read_unchecked(&net->xfrm.flow_cache_genid);
103445 if (!IS_ERR(flo))
103446 fle->object = flo;
103447 else
103448diff --git a/net/core/neighbour.c b/net/core/neighbour.c
103449index 70fe9e1..926784c 100644
103450--- a/net/core/neighbour.c
103451+++ b/net/core/neighbour.c
103452@@ -2806,7 +2806,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
103453 void __user *buffer, size_t *lenp, loff_t *ppos)
103454 {
103455 int size, ret;
103456- struct ctl_table tmp = *ctl;
103457+ ctl_table_no_const tmp = *ctl;
103458
103459 tmp.extra1 = &zero;
103460 tmp.extra2 = &unres_qlen_max;
103461@@ -2868,7 +2868,7 @@ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
103462 void __user *buffer,
103463 size_t *lenp, loff_t *ppos)
103464 {
103465- struct ctl_table tmp = *ctl;
103466+ ctl_table_no_const tmp = *ctl;
103467 int ret;
103468
103469 tmp.extra1 = &zero;
103470diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
103471index 2bf8329..2eb1423 100644
103472--- a/net/core/net-procfs.c
103473+++ b/net/core/net-procfs.c
103474@@ -79,7 +79,13 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
103475 struct rtnl_link_stats64 temp;
103476 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
103477
103478- seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
103479+ if (gr_proc_is_restricted())
103480+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
103481+ "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
103482+ dev->name, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
103483+ 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL);
103484+ else
103485+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
103486 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
103487 dev->name, stats->rx_bytes, stats->rx_packets,
103488 stats->rx_errors,
103489@@ -166,7 +172,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
103490 return 0;
103491 }
103492
103493-static const struct seq_operations dev_seq_ops = {
103494+const struct seq_operations dev_seq_ops = {
103495 .start = dev_seq_start,
103496 .next = dev_seq_next,
103497 .stop = dev_seq_stop,
103498@@ -196,7 +202,7 @@ static const struct seq_operations softnet_seq_ops = {
103499
103500 static int softnet_seq_open(struct inode *inode, struct file *file)
103501 {
103502- return seq_open(file, &softnet_seq_ops);
103503+ return seq_open_restrict(file, &softnet_seq_ops);
103504 }
103505
103506 static const struct file_operations softnet_seq_fops = {
103507@@ -283,8 +289,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
103508 else
103509 seq_printf(seq, "%04x", ntohs(pt->type));
103510
103511+#ifdef CONFIG_GRKERNSEC_HIDESYM
103512+ seq_printf(seq, " %-8s %pf\n",
103513+ pt->dev ? pt->dev->name : "", NULL);
103514+#else
103515 seq_printf(seq, " %-8s %pf\n",
103516 pt->dev ? pt->dev->name : "", pt->func);
103517+#endif
103518 }
103519
103520 return 0;
103521diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
103522index f2aa73b..0d1a1ea 100644
103523--- a/net/core/net-sysfs.c
103524+++ b/net/core/net-sysfs.c
103525@@ -279,7 +279,7 @@ static ssize_t carrier_changes_show(struct device *dev,
103526 {
103527 struct net_device *netdev = to_net_dev(dev);
103528 return sprintf(buf, fmt_dec,
103529- atomic_read(&netdev->carrier_changes));
103530+ atomic_read_unchecked(&netdev->carrier_changes));
103531 }
103532 static DEVICE_ATTR_RO(carrier_changes);
103533
103534diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
103535index 70d3450..eb7c528 100644
103536--- a/net/core/net_namespace.c
103537+++ b/net/core/net_namespace.c
103538@@ -663,7 +663,7 @@ static int __register_pernet_operations(struct list_head *list,
103539 int error;
103540 LIST_HEAD(net_exit_list);
103541
103542- list_add_tail(&ops->list, list);
103543+ pax_list_add_tail((struct list_head *)&ops->list, list);
103544 if (ops->init || (ops->id && ops->size)) {
103545 for_each_net(net) {
103546 error = ops_init(ops, net);
103547@@ -676,7 +676,7 @@ static int __register_pernet_operations(struct list_head *list,
103548
103549 out_undo:
103550 /* If I have an error cleanup all namespaces I initialized */
103551- list_del(&ops->list);
103552+ pax_list_del((struct list_head *)&ops->list);
103553 ops_exit_list(ops, &net_exit_list);
103554 ops_free_list(ops, &net_exit_list);
103555 return error;
103556@@ -687,7 +687,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
103557 struct net *net;
103558 LIST_HEAD(net_exit_list);
103559
103560- list_del(&ops->list);
103561+ pax_list_del((struct list_head *)&ops->list);
103562 for_each_net(net)
103563 list_add_tail(&net->exit_list, &net_exit_list);
103564 ops_exit_list(ops, &net_exit_list);
103565@@ -821,7 +821,7 @@ int register_pernet_device(struct pernet_operations *ops)
103566 mutex_lock(&net_mutex);
103567 error = register_pernet_operations(&pernet_list, ops);
103568 if (!error && (first_device == &pernet_list))
103569- first_device = &ops->list;
103570+ first_device = (struct list_head *)&ops->list;
103571 mutex_unlock(&net_mutex);
103572 return error;
103573 }
103574diff --git a/net/core/netpoll.c b/net/core/netpoll.c
103575index c126a87..10ad89d 100644
103576--- a/net/core/netpoll.c
103577+++ b/net/core/netpoll.c
103578@@ -377,7 +377,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
103579 struct udphdr *udph;
103580 struct iphdr *iph;
103581 struct ethhdr *eth;
103582- static atomic_t ip_ident;
103583+ static atomic_unchecked_t ip_ident;
103584 struct ipv6hdr *ip6h;
103585
103586 udp_len = len + sizeof(*udph);
103587@@ -448,7 +448,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
103588 put_unaligned(0x45, (unsigned char *)iph);
103589 iph->tos = 0;
103590 put_unaligned(htons(ip_len), &(iph->tot_len));
103591- iph->id = htons(atomic_inc_return(&ip_ident));
103592+ iph->id = htons(atomic_inc_return_unchecked(&ip_ident));
103593 iph->frag_off = 0;
103594 iph->ttl = 64;
103595 iph->protocol = IPPROTO_UDP;
103596diff --git a/net/core/pktgen.c b/net/core/pktgen.c
103597index 508155b..fad080f 100644
103598--- a/net/core/pktgen.c
103599+++ b/net/core/pktgen.c
103600@@ -3755,7 +3755,7 @@ static int __net_init pg_net_init(struct net *net)
103601 pn->net = net;
103602 INIT_LIST_HEAD(&pn->pktgen_threads);
103603 pn->pktgen_exiting = false;
103604- pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net);
103605+ pn->proc_dir = proc_mkdir_restrict(PG_PROC_DIR, pn->net->proc_net);
103606 if (!pn->proc_dir) {
103607 pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
103608 return -ENODEV;
103609diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
103610index 7ebed55..378bf34 100644
103611--- a/net/core/rtnetlink.c
103612+++ b/net/core/rtnetlink.c
103613@@ -61,7 +61,7 @@ struct rtnl_link {
103614 rtnl_doit_func doit;
103615 rtnl_dumpit_func dumpit;
103616 rtnl_calcit_func calcit;
103617-};
103618+} __no_const;
103619
103620 static DEFINE_MUTEX(rtnl_mutex);
103621
103622@@ -307,10 +307,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
103623 * to use the ops for creating device. So do not
103624 * fill up dellink as well. That disables rtnl_dellink.
103625 */
103626- if (ops->setup && !ops->dellink)
103627- ops->dellink = unregister_netdevice_queue;
103628+ if (ops->setup && !ops->dellink) {
103629+ pax_open_kernel();
103630+ *(void **)&ops->dellink = unregister_netdevice_queue;
103631+ pax_close_kernel();
103632+ }
103633
103634- list_add_tail(&ops->list, &link_ops);
103635+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
103636 return 0;
103637 }
103638 EXPORT_SYMBOL_GPL(__rtnl_link_register);
103639@@ -357,7 +360,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
103640 for_each_net(net) {
103641 __rtnl_kill_links(net, ops);
103642 }
103643- list_del(&ops->list);
103644+ pax_list_del((struct list_head *)&ops->list);
103645 }
103646 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
103647
103648@@ -1047,7 +1050,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
103649 (dev->ifalias &&
103650 nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
103651 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
103652- atomic_read(&dev->carrier_changes)))
103653+ atomic_read_unchecked(&dev->carrier_changes)))
103654 goto nla_put_failure;
103655
103656 if (1) {
103657diff --git a/net/core/scm.c b/net/core/scm.c
103658index 3b6899b..cf36238 100644
103659--- a/net/core/scm.c
103660+++ b/net/core/scm.c
103661@@ -209,7 +209,7 @@ EXPORT_SYMBOL(__scm_send);
103662 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
103663 {
103664 struct cmsghdr __user *cm
103665- = (__force struct cmsghdr __user *)msg->msg_control;
103666+ = (struct cmsghdr __force_user *)msg->msg_control;
103667 struct cmsghdr cmhdr;
103668 int cmlen = CMSG_LEN(len);
103669 int err;
103670@@ -232,7 +232,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
103671 err = -EFAULT;
103672 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
103673 goto out;
103674- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
103675+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
103676 goto out;
103677 cmlen = CMSG_SPACE(len);
103678 if (msg->msg_controllen < cmlen)
103679@@ -248,7 +248,7 @@ EXPORT_SYMBOL(put_cmsg);
103680 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
103681 {
103682 struct cmsghdr __user *cm
103683- = (__force struct cmsghdr __user*)msg->msg_control;
103684+ = (struct cmsghdr __force_user *)msg->msg_control;
103685
103686 int fdmax = 0;
103687 int fdnum = scm->fp->count;
103688@@ -268,7 +268,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
103689 if (fdnum < fdmax)
103690 fdmax = fdnum;
103691
103692- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
103693+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
103694 i++, cmfptr++)
103695 {
103696 struct socket *sock;
103697diff --git a/net/core/skbuff.c b/net/core/skbuff.c
103698index e9f9a15..6eb024e 100644
103699--- a/net/core/skbuff.c
103700+++ b/net/core/skbuff.c
103701@@ -2139,7 +2139,7 @@ EXPORT_SYMBOL(__skb_checksum);
103702 __wsum skb_checksum(const struct sk_buff *skb, int offset,
103703 int len, __wsum csum)
103704 {
103705- const struct skb_checksum_ops ops = {
103706+ static const struct skb_checksum_ops ops = {
103707 .update = csum_partial_ext,
103708 .combine = csum_block_add_ext,
103709 };
103710@@ -3379,12 +3379,14 @@ void __init skb_init(void)
103711 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
103712 sizeof(struct sk_buff),
103713 0,
103714- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
103715+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
103716+ SLAB_NO_SANITIZE,
103717 NULL);
103718 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
103719 sizeof(struct sk_buff_fclones),
103720 0,
103721- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
103722+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
103723+ SLAB_NO_SANITIZE,
103724 NULL);
103725 }
103726
103727diff --git a/net/core/sock.c b/net/core/sock.c
103728index 71e3e5f..ab90920 100644
103729--- a/net/core/sock.c
103730+++ b/net/core/sock.c
103731@@ -443,7 +443,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
103732 struct sk_buff_head *list = &sk->sk_receive_queue;
103733
103734 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
103735- atomic_inc(&sk->sk_drops);
103736+ atomic_inc_unchecked(&sk->sk_drops);
103737 trace_sock_rcvqueue_full(sk, skb);
103738 return -ENOMEM;
103739 }
103740@@ -453,7 +453,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
103741 return err;
103742
103743 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
103744- atomic_inc(&sk->sk_drops);
103745+ atomic_inc_unchecked(&sk->sk_drops);
103746 return -ENOBUFS;
103747 }
103748
103749@@ -466,7 +466,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
103750 skb_dst_force(skb);
103751
103752 spin_lock_irqsave(&list->lock, flags);
103753- skb->dropcount = atomic_read(&sk->sk_drops);
103754+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
103755 __skb_queue_tail(list, skb);
103756 spin_unlock_irqrestore(&list->lock, flags);
103757
103758@@ -486,7 +486,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
103759 skb->dev = NULL;
103760
103761 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
103762- atomic_inc(&sk->sk_drops);
103763+ atomic_inc_unchecked(&sk->sk_drops);
103764 goto discard_and_relse;
103765 }
103766 if (nested)
103767@@ -504,7 +504,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
103768 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
103769 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
103770 bh_unlock_sock(sk);
103771- atomic_inc(&sk->sk_drops);
103772+ atomic_inc_unchecked(&sk->sk_drops);
103773 goto discard_and_relse;
103774 }
103775
103776@@ -910,6 +910,7 @@ set_rcvbuf:
103777 }
103778 break;
103779
103780+#ifndef GRKERNSEC_BPF_HARDEN
103781 case SO_ATTACH_BPF:
103782 ret = -EINVAL;
103783 if (optlen == sizeof(u32)) {
103784@@ -922,7 +923,7 @@ set_rcvbuf:
103785 ret = sk_attach_bpf(ufd, sk);
103786 }
103787 break;
103788-
103789+#endif
103790 case SO_DETACH_FILTER:
103791 ret = sk_detach_filter(sk);
103792 break;
103793@@ -1026,12 +1027,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
103794 struct timeval tm;
103795 } v;
103796
103797- int lv = sizeof(int);
103798- int len;
103799+ unsigned int lv = sizeof(int);
103800+ unsigned int len;
103801
103802 if (get_user(len, optlen))
103803 return -EFAULT;
103804- if (len < 0)
103805+ if (len > INT_MAX)
103806 return -EINVAL;
103807
103808 memset(&v, 0, sizeof(v));
103809@@ -1169,11 +1170,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
103810
103811 case SO_PEERNAME:
103812 {
103813- char address[128];
103814+ char address[_K_SS_MAXSIZE];
103815
103816 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
103817 return -ENOTCONN;
103818- if (lv < len)
103819+ if (lv < len || sizeof address < len)
103820 return -EINVAL;
103821 if (copy_to_user(optval, address, len))
103822 return -EFAULT;
103823@@ -1258,7 +1259,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
103824
103825 if (len > lv)
103826 len = lv;
103827- if (copy_to_user(optval, &v, len))
103828+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
103829 return -EFAULT;
103830 lenout:
103831 if (put_user(len, optlen))
103832@@ -2375,7 +2376,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
103833 */
103834 smp_wmb();
103835 atomic_set(&sk->sk_refcnt, 1);
103836- atomic_set(&sk->sk_drops, 0);
103837+ atomic_set_unchecked(&sk->sk_drops, 0);
103838 }
103839 EXPORT_SYMBOL(sock_init_data);
103840
103841@@ -2503,6 +2504,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
103842 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
103843 int level, int type)
103844 {
103845+ struct sock_extended_err ee;
103846 struct sock_exterr_skb *serr;
103847 struct sk_buff *skb;
103848 int copied, err;
103849@@ -2524,7 +2526,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
103850 sock_recv_timestamp(msg, sk, skb);
103851
103852 serr = SKB_EXT_ERR(skb);
103853- put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
103854+ ee = serr->ee;
103855+ put_cmsg(msg, level, type, sizeof ee, &ee);
103856
103857 msg->msg_flags |= MSG_ERRQUEUE;
103858 err = copied;
103859diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
103860index ad704c7..ca48aff 100644
103861--- a/net/core/sock_diag.c
103862+++ b/net/core/sock_diag.c
103863@@ -9,26 +9,33 @@
103864 #include <linux/inet_diag.h>
103865 #include <linux/sock_diag.h>
103866
103867-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
103868+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
103869 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
103870 static DEFINE_MUTEX(sock_diag_table_mutex);
103871
103872 int sock_diag_check_cookie(void *sk, __u32 *cookie)
103873 {
103874+#ifndef CONFIG_GRKERNSEC_HIDESYM
103875 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
103876 cookie[1] != INET_DIAG_NOCOOKIE) &&
103877 ((u32)(unsigned long)sk != cookie[0] ||
103878 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
103879 return -ESTALE;
103880 else
103881+#endif
103882 return 0;
103883 }
103884 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
103885
103886 void sock_diag_save_cookie(void *sk, __u32 *cookie)
103887 {
103888+#ifdef CONFIG_GRKERNSEC_HIDESYM
103889+ cookie[0] = 0;
103890+ cookie[1] = 0;
103891+#else
103892 cookie[0] = (u32)(unsigned long)sk;
103893 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
103894+#endif
103895 }
103896 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
103897
103898@@ -110,8 +117,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
103899 mutex_lock(&sock_diag_table_mutex);
103900 if (sock_diag_handlers[hndl->family])
103901 err = -EBUSY;
103902- else
103903+ else {
103904+ pax_open_kernel();
103905 sock_diag_handlers[hndl->family] = hndl;
103906+ pax_close_kernel();
103907+ }
103908 mutex_unlock(&sock_diag_table_mutex);
103909
103910 return err;
103911@@ -127,7 +137,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
103912
103913 mutex_lock(&sock_diag_table_mutex);
103914 BUG_ON(sock_diag_handlers[family] != hnld);
103915+ pax_open_kernel();
103916 sock_diag_handlers[family] = NULL;
103917+ pax_close_kernel();
103918 mutex_unlock(&sock_diag_table_mutex);
103919 }
103920 EXPORT_SYMBOL_GPL(sock_diag_unregister);
103921diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
103922index 8ce351f..2c388f7 100644
103923--- a/net/core/sysctl_net_core.c
103924+++ b/net/core/sysctl_net_core.c
103925@@ -36,7 +36,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
103926 {
103927 unsigned int orig_size, size;
103928 int ret, i;
103929- struct ctl_table tmp = {
103930+ ctl_table_no_const tmp = {
103931 .data = &size,
103932 .maxlen = sizeof(size),
103933 .mode = table->mode
103934@@ -204,7 +204,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
103935 void __user *buffer, size_t *lenp, loff_t *ppos)
103936 {
103937 char id[IFNAMSIZ];
103938- struct ctl_table tbl = {
103939+ ctl_table_no_const tbl = {
103940 .data = id,
103941 .maxlen = IFNAMSIZ,
103942 };
103943@@ -222,7 +222,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
103944 static int proc_do_rss_key(struct ctl_table *table, int write,
103945 void __user *buffer, size_t *lenp, loff_t *ppos)
103946 {
103947- struct ctl_table fake_table;
103948+ ctl_table_no_const fake_table;
103949 char buf[NETDEV_RSS_KEY_LEN * 3];
103950
103951 snprintf(buf, sizeof(buf), "%*phC", NETDEV_RSS_KEY_LEN, netdev_rss_key);
103952@@ -286,7 +286,7 @@ static struct ctl_table net_core_table[] = {
103953 .mode = 0444,
103954 .proc_handler = proc_do_rss_key,
103955 },
103956-#ifdef CONFIG_BPF_JIT
103957+#if defined(CONFIG_BPF_JIT) && !defined(CONFIG_GRKERNSEC_BPF_HARDEN)
103958 {
103959 .procname = "bpf_jit_enable",
103960 .data = &bpf_jit_enable,
103961@@ -411,13 +411,12 @@ static struct ctl_table netns_core_table[] = {
103962
103963 static __net_init int sysctl_core_net_init(struct net *net)
103964 {
103965- struct ctl_table *tbl;
103966+ ctl_table_no_const *tbl = NULL;
103967
103968 net->core.sysctl_somaxconn = SOMAXCONN;
103969
103970- tbl = netns_core_table;
103971 if (!net_eq(net, &init_net)) {
103972- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
103973+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
103974 if (tbl == NULL)
103975 goto err_dup;
103976
103977@@ -427,17 +426,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
103978 if (net->user_ns != &init_user_ns) {
103979 tbl[0].procname = NULL;
103980 }
103981- }
103982-
103983- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
103984+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
103985+ } else
103986+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
103987 if (net->core.sysctl_hdr == NULL)
103988 goto err_reg;
103989
103990 return 0;
103991
103992 err_reg:
103993- if (tbl != netns_core_table)
103994- kfree(tbl);
103995+ kfree(tbl);
103996 err_dup:
103997 return -ENOMEM;
103998 }
103999@@ -452,7 +450,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
104000 kfree(tbl);
104001 }
104002
104003-static __net_initdata struct pernet_operations sysctl_core_ops = {
104004+static __net_initconst struct pernet_operations sysctl_core_ops = {
104005 .init = sysctl_core_net_init,
104006 .exit = sysctl_core_net_exit,
104007 };
104008diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
104009index 8102286..a0c2755 100644
104010--- a/net/decnet/af_decnet.c
104011+++ b/net/decnet/af_decnet.c
104012@@ -466,6 +466,7 @@ static struct proto dn_proto = {
104013 .sysctl_rmem = sysctl_decnet_rmem,
104014 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
104015 .obj_size = sizeof(struct dn_sock),
104016+ .slab_flags = SLAB_USERCOPY,
104017 };
104018
104019 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
104020diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
104021index b2c26b0..41f803e 100644
104022--- a/net/decnet/dn_dev.c
104023+++ b/net/decnet/dn_dev.c
104024@@ -201,7 +201,7 @@ static struct dn_dev_sysctl_table {
104025 .extra1 = &min_t3,
104026 .extra2 = &max_t3
104027 },
104028- {0}
104029+ { }
104030 },
104031 };
104032
104033diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
104034index 5325b54..a0d4d69 100644
104035--- a/net/decnet/sysctl_net_decnet.c
104036+++ b/net/decnet/sysctl_net_decnet.c
104037@@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
104038
104039 if (len > *lenp) len = *lenp;
104040
104041- if (copy_to_user(buffer, addr, len))
104042+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
104043 return -EFAULT;
104044
104045 *lenp = len;
104046@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
104047
104048 if (len > *lenp) len = *lenp;
104049
104050- if (copy_to_user(buffer, devname, len))
104051+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
104052 return -EFAULT;
104053
104054 *lenp = len;
104055diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
104056index a2c7e4c..3dc9f67 100644
104057--- a/net/hsr/hsr_netlink.c
104058+++ b/net/hsr/hsr_netlink.c
104059@@ -102,7 +102,7 @@ nla_put_failure:
104060 return -EMSGSIZE;
104061 }
104062
104063-static struct rtnl_link_ops hsr_link_ops __read_mostly = {
104064+static struct rtnl_link_ops hsr_link_ops = {
104065 .kind = "hsr",
104066 .maxtype = IFLA_HSR_MAX,
104067 .policy = hsr_policy,
104068diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
104069index 055fbb7..c0dbe60 100644
104070--- a/net/ieee802154/6lowpan/core.c
104071+++ b/net/ieee802154/6lowpan/core.c
104072@@ -217,7 +217,7 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
104073 dev_put(real_dev);
104074 }
104075
104076-static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
104077+static struct rtnl_link_ops lowpan_link_ops = {
104078 .kind = "lowpan",
104079 .priv_size = sizeof(struct lowpan_dev_info),
104080 .setup = lowpan_setup,
104081diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
104082index f46e4d1..30231f1 100644
104083--- a/net/ieee802154/6lowpan/reassembly.c
104084+++ b/net/ieee802154/6lowpan/reassembly.c
104085@@ -435,14 +435,13 @@ static struct ctl_table lowpan_frags_ctl_table[] = {
104086
104087 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
104088 {
104089- struct ctl_table *table;
104090+ ctl_table_no_const *table = NULL;
104091 struct ctl_table_header *hdr;
104092 struct netns_ieee802154_lowpan *ieee802154_lowpan =
104093 net_ieee802154_lowpan(net);
104094
104095- table = lowpan_frags_ns_ctl_table;
104096 if (!net_eq(net, &init_net)) {
104097- table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
104098+ table = kmemdup(lowpan_frags_ns_ctl_table, sizeof(lowpan_frags_ns_ctl_table),
104099 GFP_KERNEL);
104100 if (table == NULL)
104101 goto err_alloc;
104102@@ -457,9 +456,9 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
104103 /* Don't export sysctls to unprivileged users */
104104 if (net->user_ns != &init_user_ns)
104105 table[0].procname = NULL;
104106- }
104107-
104108- hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
104109+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
104110+ } else
104111+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", lowpan_frags_ns_ctl_table);
104112 if (hdr == NULL)
104113 goto err_reg;
104114
104115@@ -467,8 +466,7 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
104116 return 0;
104117
104118 err_reg:
104119- if (!net_eq(net, &init_net))
104120- kfree(table);
104121+ kfree(table);
104122 err_alloc:
104123 return -ENOMEM;
104124 }
104125diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
104126index d2e49ba..f78e8aa 100644
104127--- a/net/ipv4/af_inet.c
104128+++ b/net/ipv4/af_inet.c
104129@@ -1390,7 +1390,7 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
104130 return ip_recv_error(sk, msg, len, addr_len);
104131 #if IS_ENABLED(CONFIG_IPV6)
104132 if (sk->sk_family == AF_INET6)
104133- return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
104134+ return pingv6_ops->ipv6_recv_error(sk, msg, len, addr_len);
104135 #endif
104136 return -EINVAL;
104137 }
104138diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
104139index 3a8985c..9d2a870 100644
104140--- a/net/ipv4/devinet.c
104141+++ b/net/ipv4/devinet.c
104142@@ -69,7 +69,8 @@
104143
104144 static struct ipv4_devconf ipv4_devconf = {
104145 .data = {
104146- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
104147+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
104148+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
104149 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
104150 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
104151 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
104152@@ -80,7 +81,8 @@ static struct ipv4_devconf ipv4_devconf = {
104153
104154 static struct ipv4_devconf ipv4_devconf_dflt = {
104155 .data = {
104156- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
104157+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
104158+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
104159 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
104160 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
104161 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
104162@@ -1549,7 +1551,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
104163 idx = 0;
104164 head = &net->dev_index_head[h];
104165 rcu_read_lock();
104166- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
104167+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
104168 net->dev_base_seq;
104169 hlist_for_each_entry_rcu(dev, head, index_hlist) {
104170 if (idx < s_idx)
104171@@ -1868,7 +1870,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
104172 idx = 0;
104173 head = &net->dev_index_head[h];
104174 rcu_read_lock();
104175- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
104176+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
104177 net->dev_base_seq;
104178 hlist_for_each_entry_rcu(dev, head, index_hlist) {
104179 if (idx < s_idx)
104180@@ -2103,7 +2105,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
104181 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
104182 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
104183
104184-static struct devinet_sysctl_table {
104185+static const struct devinet_sysctl_table {
104186 struct ctl_table_header *sysctl_header;
104187 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
104188 } devinet_sysctl = {
104189@@ -2235,7 +2237,7 @@ static __net_init int devinet_init_net(struct net *net)
104190 int err;
104191 struct ipv4_devconf *all, *dflt;
104192 #ifdef CONFIG_SYSCTL
104193- struct ctl_table *tbl = ctl_forward_entry;
104194+ ctl_table_no_const *tbl = NULL;
104195 struct ctl_table_header *forw_hdr;
104196 #endif
104197
104198@@ -2253,7 +2255,7 @@ static __net_init int devinet_init_net(struct net *net)
104199 goto err_alloc_dflt;
104200
104201 #ifdef CONFIG_SYSCTL
104202- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
104203+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
104204 if (tbl == NULL)
104205 goto err_alloc_ctl;
104206
104207@@ -2273,7 +2275,10 @@ static __net_init int devinet_init_net(struct net *net)
104208 goto err_reg_dflt;
104209
104210 err = -ENOMEM;
104211- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
104212+ if (!net_eq(net, &init_net))
104213+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
104214+ else
104215+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
104216 if (forw_hdr == NULL)
104217 goto err_reg_ctl;
104218 net->ipv4.forw_hdr = forw_hdr;
104219@@ -2289,8 +2294,7 @@ err_reg_ctl:
104220 err_reg_dflt:
104221 __devinet_sysctl_unregister(all);
104222 err_reg_all:
104223- if (tbl != ctl_forward_entry)
104224- kfree(tbl);
104225+ kfree(tbl);
104226 err_alloc_ctl:
104227 #endif
104228 if (dflt != &ipv4_devconf_dflt)
104229diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
104230index 23b9b3e..60cf0c4 100644
104231--- a/net/ipv4/fib_frontend.c
104232+++ b/net/ipv4/fib_frontend.c
104233@@ -1020,12 +1020,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
104234 #ifdef CONFIG_IP_ROUTE_MULTIPATH
104235 fib_sync_up(dev);
104236 #endif
104237- atomic_inc(&net->ipv4.dev_addr_genid);
104238+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
104239 rt_cache_flush(dev_net(dev));
104240 break;
104241 case NETDEV_DOWN:
104242 fib_del_ifaddr(ifa, NULL);
104243- atomic_inc(&net->ipv4.dev_addr_genid);
104244+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
104245 if (ifa->ifa_dev->ifa_list == NULL) {
104246 /* Last address was deleted from this interface.
104247 * Disable IP.
104248@@ -1063,7 +1063,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
104249 #ifdef CONFIG_IP_ROUTE_MULTIPATH
104250 fib_sync_up(dev);
104251 #endif
104252- atomic_inc(&net->ipv4.dev_addr_genid);
104253+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
104254 rt_cache_flush(net);
104255 break;
104256 case NETDEV_DOWN:
104257diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
104258index 1e2090e..351a724 100644
104259--- a/net/ipv4/fib_semantics.c
104260+++ b/net/ipv4/fib_semantics.c
104261@@ -753,7 +753,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
104262 nh->nh_saddr = inet_select_addr(nh->nh_dev,
104263 nh->nh_gw,
104264 nh->nh_parent->fib_scope);
104265- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
104266+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
104267
104268 return nh->nh_saddr;
104269 }
104270diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
104271index ff069f6..335e752 100644
104272--- a/net/ipv4/fou.c
104273+++ b/net/ipv4/fou.c
104274@@ -771,12 +771,12 @@ EXPORT_SYMBOL(gue_build_header);
104275
104276 #ifdef CONFIG_NET_FOU_IP_TUNNELS
104277
104278-static const struct ip_tunnel_encap_ops __read_mostly fou_iptun_ops = {
104279+static const struct ip_tunnel_encap_ops fou_iptun_ops = {
104280 .encap_hlen = fou_encap_hlen,
104281 .build_header = fou_build_header,
104282 };
104283
104284-static const struct ip_tunnel_encap_ops __read_mostly gue_iptun_ops = {
104285+static const struct ip_tunnel_encap_ops gue_iptun_ops = {
104286 .encap_hlen = gue_encap_hlen,
104287 .build_header = gue_build_header,
104288 };
104289diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
104290index 9111a4e..3576905 100644
104291--- a/net/ipv4/inet_hashtables.c
104292+++ b/net/ipv4/inet_hashtables.c
104293@@ -18,6 +18,7 @@
104294 #include <linux/sched.h>
104295 #include <linux/slab.h>
104296 #include <linux/wait.h>
104297+#include <linux/security.h>
104298
104299 #include <net/inet_connection_sock.h>
104300 #include <net/inet_hashtables.h>
104301@@ -49,6 +50,8 @@ static unsigned int inet_sk_ehashfn(const struct sock *sk)
104302 return inet_ehashfn(net, laddr, lport, faddr, fport);
104303 }
104304
104305+extern void gr_update_task_in_ip_table(const struct inet_sock *inet);
104306+
104307 /*
104308 * Allocate and initialize a new local port bind bucket.
104309 * The bindhash mutex for snum's hash chain must be held here.
104310@@ -554,6 +557,8 @@ ok:
104311 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
104312 spin_unlock(&head->lock);
104313
104314+ gr_update_task_in_ip_table(inet_sk(sk));
104315+
104316 if (tw) {
104317 inet_twsk_deschedule(tw, death_row);
104318 while (twrefcnt) {
104319diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
104320index 241afd7..31b95d5 100644
104321--- a/net/ipv4/inetpeer.c
104322+++ b/net/ipv4/inetpeer.c
104323@@ -461,7 +461,7 @@ relookup:
104324 if (p) {
104325 p->daddr = *daddr;
104326 atomic_set(&p->refcnt, 1);
104327- atomic_set(&p->rid, 0);
104328+ atomic_set_unchecked(&p->rid, 0);
104329 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
104330 p->rate_tokens = 0;
104331 /* 60*HZ is arbitrary, but chosen enough high so that the first
104332diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
104333index 145a50c..5dd8cc5 100644
104334--- a/net/ipv4/ip_fragment.c
104335+++ b/net/ipv4/ip_fragment.c
104336@@ -268,7 +268,7 @@ static int ip_frag_too_far(struct ipq *qp)
104337 return 0;
104338
104339 start = qp->rid;
104340- end = atomic_inc_return(&peer->rid);
104341+ end = atomic_inc_return_unchecked(&peer->rid);
104342 qp->rid = end;
104343
104344 rc = qp->q.fragments && (end - start) > max;
104345@@ -748,12 +748,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
104346
104347 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
104348 {
104349- struct ctl_table *table;
104350+ ctl_table_no_const *table = NULL;
104351 struct ctl_table_header *hdr;
104352
104353- table = ip4_frags_ns_ctl_table;
104354 if (!net_eq(net, &init_net)) {
104355- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
104356+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
104357 if (table == NULL)
104358 goto err_alloc;
104359
104360@@ -767,9 +766,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
104361 /* Don't export sysctls to unprivileged users */
104362 if (net->user_ns != &init_user_ns)
104363 table[0].procname = NULL;
104364- }
104365+ hdr = register_net_sysctl(net, "net/ipv4", table);
104366+ } else
104367+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
104368
104369- hdr = register_net_sysctl(net, "net/ipv4", table);
104370 if (hdr == NULL)
104371 goto err_reg;
104372
104373@@ -777,8 +777,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
104374 return 0;
104375
104376 err_reg:
104377- if (!net_eq(net, &init_net))
104378- kfree(table);
104379+ kfree(table);
104380 err_alloc:
104381 return -ENOMEM;
104382 }
104383diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
104384index 6207275f..00323a2 100644
104385--- a/net/ipv4/ip_gre.c
104386+++ b/net/ipv4/ip_gre.c
104387@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
104388 module_param(log_ecn_error, bool, 0644);
104389 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
104390
104391-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
104392+static struct rtnl_link_ops ipgre_link_ops;
104393 static int ipgre_tunnel_init(struct net_device *dev);
104394
104395 static int ipgre_net_id __read_mostly;
104396@@ -817,7 +817,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
104397 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
104398 };
104399
104400-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
104401+static struct rtnl_link_ops ipgre_link_ops = {
104402 .kind = "gre",
104403 .maxtype = IFLA_GRE_MAX,
104404 .policy = ipgre_policy,
104405@@ -832,7 +832,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
104406 .get_link_net = ip_tunnel_get_link_net,
104407 };
104408
104409-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
104410+static struct rtnl_link_ops ipgre_tap_ops = {
104411 .kind = "gretap",
104412 .maxtype = IFLA_GRE_MAX,
104413 .policy = ipgre_policy,
104414diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
104415index 3d4da2c..40f9c29 100644
104416--- a/net/ipv4/ip_input.c
104417+++ b/net/ipv4/ip_input.c
104418@@ -147,6 +147,10 @@
104419 #include <linux/mroute.h>
104420 #include <linux/netlink.h>
104421
104422+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104423+extern int grsec_enable_blackhole;
104424+#endif
104425+
104426 /*
104427 * Process Router Attention IP option (RFC 2113)
104428 */
104429@@ -223,6 +227,9 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
104430 if (!raw) {
104431 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
104432 IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS);
104433+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104434+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
104435+#endif
104436 icmp_send(skb, ICMP_DEST_UNREACH,
104437 ICMP_PROT_UNREACH, 0);
104438 }
104439diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
104440index 5cd9927..8610b9f 100644
104441--- a/net/ipv4/ip_sockglue.c
104442+++ b/net/ipv4/ip_sockglue.c
104443@@ -1254,7 +1254,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
104444 len = min_t(unsigned int, len, opt->optlen);
104445 if (put_user(len, optlen))
104446 return -EFAULT;
104447- if (copy_to_user(optval, opt->__data, len))
104448+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
104449+ copy_to_user(optval, opt->__data, len))
104450 return -EFAULT;
104451 return 0;
104452 }
104453@@ -1388,7 +1389,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
104454 if (sk->sk_type != SOCK_STREAM)
104455 return -ENOPROTOOPT;
104456
104457- msg.msg_control = (__force void *) optval;
104458+ msg.msg_control = (__force_kernel void *) optval;
104459 msg.msg_controllen = len;
104460 msg.msg_flags = flags;
104461
104462diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
104463index 94efe14..1453fcc 100644
104464--- a/net/ipv4/ip_vti.c
104465+++ b/net/ipv4/ip_vti.c
104466@@ -45,7 +45,7 @@
104467 #include <net/net_namespace.h>
104468 #include <net/netns/generic.h>
104469
104470-static struct rtnl_link_ops vti_link_ops __read_mostly;
104471+static struct rtnl_link_ops vti_link_ops;
104472
104473 static int vti_net_id __read_mostly;
104474 static int vti_tunnel_init(struct net_device *dev);
104475@@ -519,7 +519,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
104476 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
104477 };
104478
104479-static struct rtnl_link_ops vti_link_ops __read_mostly = {
104480+static struct rtnl_link_ops vti_link_ops = {
104481 .kind = "vti",
104482 .maxtype = IFLA_VTI_MAX,
104483 .policy = vti_policy,
104484diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
104485index b26376e..fc3d733 100644
104486--- a/net/ipv4/ipconfig.c
104487+++ b/net/ipv4/ipconfig.c
104488@@ -333,7 +333,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
104489
104490 mm_segment_t oldfs = get_fs();
104491 set_fs(get_ds());
104492- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
104493+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
104494 set_fs(oldfs);
104495 return res;
104496 }
104497@@ -344,7 +344,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
104498
104499 mm_segment_t oldfs = get_fs();
104500 set_fs(get_ds());
104501- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
104502+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
104503 set_fs(oldfs);
104504 return res;
104505 }
104506@@ -355,7 +355,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
104507
104508 mm_segment_t oldfs = get_fs();
104509 set_fs(get_ds());
104510- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
104511+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
104512 set_fs(oldfs);
104513 return res;
104514 }
104515diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
104516index 915d215..48d1db7 100644
104517--- a/net/ipv4/ipip.c
104518+++ b/net/ipv4/ipip.c
104519@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
104520 static int ipip_net_id __read_mostly;
104521
104522 static int ipip_tunnel_init(struct net_device *dev);
104523-static struct rtnl_link_ops ipip_link_ops __read_mostly;
104524+static struct rtnl_link_ops ipip_link_ops;
104525
104526 static int ipip_err(struct sk_buff *skb, u32 info)
104527 {
104528@@ -487,7 +487,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
104529 [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
104530 };
104531
104532-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
104533+static struct rtnl_link_ops ipip_link_ops = {
104534 .kind = "ipip",
104535 .maxtype = IFLA_IPTUN_MAX,
104536 .policy = ipip_policy,
104537diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
104538index f95b6f9..2ee2097 100644
104539--- a/net/ipv4/netfilter/arp_tables.c
104540+++ b/net/ipv4/netfilter/arp_tables.c
104541@@ -885,14 +885,14 @@ static int compat_table_info(const struct xt_table_info *info,
104542 #endif
104543
104544 static int get_info(struct net *net, void __user *user,
104545- const int *len, int compat)
104546+ int len, int compat)
104547 {
104548 char name[XT_TABLE_MAXNAMELEN];
104549 struct xt_table *t;
104550 int ret;
104551
104552- if (*len != sizeof(struct arpt_getinfo)) {
104553- duprintf("length %u != %Zu\n", *len,
104554+ if (len != sizeof(struct arpt_getinfo)) {
104555+ duprintf("length %u != %Zu\n", len,
104556 sizeof(struct arpt_getinfo));
104557 return -EINVAL;
104558 }
104559@@ -929,7 +929,7 @@ static int get_info(struct net *net, void __user *user,
104560 info.size = private->size;
104561 strcpy(info.name, name);
104562
104563- if (copy_to_user(user, &info, *len) != 0)
104564+ if (copy_to_user(user, &info, len) != 0)
104565 ret = -EFAULT;
104566 else
104567 ret = 0;
104568@@ -1690,7 +1690,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
104569
104570 switch (cmd) {
104571 case ARPT_SO_GET_INFO:
104572- ret = get_info(sock_net(sk), user, len, 1);
104573+ ret = get_info(sock_net(sk), user, *len, 1);
104574 break;
104575 case ARPT_SO_GET_ENTRIES:
104576 ret = compat_get_entries(sock_net(sk), user, len);
104577@@ -1735,7 +1735,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
104578
104579 switch (cmd) {
104580 case ARPT_SO_GET_INFO:
104581- ret = get_info(sock_net(sk), user, len, 0);
104582+ ret = get_info(sock_net(sk), user, *len, 0);
104583 break;
104584
104585 case ARPT_SO_GET_ENTRIES:
104586diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
104587index cf5e82f..75a20f5 100644
104588--- a/net/ipv4/netfilter/ip_tables.c
104589+++ b/net/ipv4/netfilter/ip_tables.c
104590@@ -1073,14 +1073,14 @@ static int compat_table_info(const struct xt_table_info *info,
104591 #endif
104592
104593 static int get_info(struct net *net, void __user *user,
104594- const int *len, int compat)
104595+ int len, int compat)
104596 {
104597 char name[XT_TABLE_MAXNAMELEN];
104598 struct xt_table *t;
104599 int ret;
104600
104601- if (*len != sizeof(struct ipt_getinfo)) {
104602- duprintf("length %u != %zu\n", *len,
104603+ if (len != sizeof(struct ipt_getinfo)) {
104604+ duprintf("length %u != %zu\n", len,
104605 sizeof(struct ipt_getinfo));
104606 return -EINVAL;
104607 }
104608@@ -1117,7 +1117,7 @@ static int get_info(struct net *net, void __user *user,
104609 info.size = private->size;
104610 strcpy(info.name, name);
104611
104612- if (copy_to_user(user, &info, *len) != 0)
104613+ if (copy_to_user(user, &info, len) != 0)
104614 ret = -EFAULT;
104615 else
104616 ret = 0;
104617@@ -1973,7 +1973,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
104618
104619 switch (cmd) {
104620 case IPT_SO_GET_INFO:
104621- ret = get_info(sock_net(sk), user, len, 1);
104622+ ret = get_info(sock_net(sk), user, *len, 1);
104623 break;
104624 case IPT_SO_GET_ENTRIES:
104625 ret = compat_get_entries(sock_net(sk), user, len);
104626@@ -2020,7 +2020,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
104627
104628 switch (cmd) {
104629 case IPT_SO_GET_INFO:
104630- ret = get_info(sock_net(sk), user, len, 0);
104631+ ret = get_info(sock_net(sk), user, *len, 0);
104632 break;
104633
104634 case IPT_SO_GET_ENTRIES:
104635diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
104636index e90f83a..3e6acca 100644
104637--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
104638+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
104639@@ -720,7 +720,7 @@ static int clusterip_net_init(struct net *net)
104640 spin_lock_init(&cn->lock);
104641
104642 #ifdef CONFIG_PROC_FS
104643- cn->procdir = proc_mkdir("ipt_CLUSTERIP", net->proc_net);
104644+ cn->procdir = proc_mkdir_restrict("ipt_CLUSTERIP", net->proc_net);
104645 if (!cn->procdir) {
104646 pr_err("Unable to proc dir entry\n");
104647 return -ENOMEM;
104648diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
104649index 787b0d6..ab6c0ba 100644
104650--- a/net/ipv4/ping.c
104651+++ b/net/ipv4/ping.c
104652@@ -59,7 +59,7 @@ struct ping_table {
104653 };
104654
104655 static struct ping_table ping_table;
104656-struct pingv6_ops pingv6_ops;
104657+struct pingv6_ops *pingv6_ops;
104658 EXPORT_SYMBOL_GPL(pingv6_ops);
104659
104660 static u16 ping_port_rover;
104661@@ -359,7 +359,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
104662 return -ENODEV;
104663 }
104664 }
104665- has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
104666+ has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
104667 scoped);
104668 rcu_read_unlock();
104669
104670@@ -567,7 +567,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
104671 }
104672 #if IS_ENABLED(CONFIG_IPV6)
104673 } else if (skb->protocol == htons(ETH_P_IPV6)) {
104674- harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
104675+ harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
104676 #endif
104677 }
104678
104679@@ -585,7 +585,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
104680 info, (u8 *)icmph);
104681 #if IS_ENABLED(CONFIG_IPV6)
104682 } else if (family == AF_INET6) {
104683- pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
104684+ pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
104685 info, (u8 *)icmph);
104686 #endif
104687 }
104688@@ -919,10 +919,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
104689 }
104690
104691 if (inet6_sk(sk)->rxopt.all)
104692- pingv6_ops.ip6_datagram_recv_common_ctl(sk, msg, skb);
104693+ pingv6_ops->ip6_datagram_recv_common_ctl(sk, msg, skb);
104694 if (skb->protocol == htons(ETH_P_IPV6) &&
104695 inet6_sk(sk)->rxopt.all)
104696- pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb);
104697+ pingv6_ops->ip6_datagram_recv_specific_ctl(sk, msg, skb);
104698 else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
104699 ip_cmsg_recv(msg, skb);
104700 #endif
104701@@ -1117,7 +1117,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
104702 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
104703 0, sock_i_ino(sp),
104704 atomic_read(&sp->sk_refcnt), sp,
104705- atomic_read(&sp->sk_drops));
104706+ atomic_read_unchecked(&sp->sk_drops));
104707 }
104708
104709 static int ping_v4_seq_show(struct seq_file *seq, void *v)
104710diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
104711index f027a70..2e64edc 100644
104712--- a/net/ipv4/raw.c
104713+++ b/net/ipv4/raw.c
104714@@ -324,7 +324,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
104715 int raw_rcv(struct sock *sk, struct sk_buff *skb)
104716 {
104717 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
104718- atomic_inc(&sk->sk_drops);
104719+ atomic_inc_unchecked(&sk->sk_drops);
104720 kfree_skb(skb);
104721 return NET_RX_DROP;
104722 }
104723@@ -773,16 +773,20 @@ static int raw_init(struct sock *sk)
104724
104725 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
104726 {
104727+ struct icmp_filter filter;
104728+
104729 if (optlen > sizeof(struct icmp_filter))
104730 optlen = sizeof(struct icmp_filter);
104731- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
104732+ if (copy_from_user(&filter, optval, optlen))
104733 return -EFAULT;
104734+ raw_sk(sk)->filter = filter;
104735 return 0;
104736 }
104737
104738 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
104739 {
104740 int len, ret = -EFAULT;
104741+ struct icmp_filter filter;
104742
104743 if (get_user(len, optlen))
104744 goto out;
104745@@ -792,8 +796,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
104746 if (len > sizeof(struct icmp_filter))
104747 len = sizeof(struct icmp_filter);
104748 ret = -EFAULT;
104749- if (put_user(len, optlen) ||
104750- copy_to_user(optval, &raw_sk(sk)->filter, len))
104751+ filter = raw_sk(sk)->filter;
104752+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
104753 goto out;
104754 ret = 0;
104755 out: return ret;
104756@@ -1022,7 +1026,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
104757 0, 0L, 0,
104758 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
104759 0, sock_i_ino(sp),
104760- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
104761+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
104762 }
104763
104764 static int raw_seq_show(struct seq_file *seq, void *v)
104765diff --git a/net/ipv4/route.c b/net/ipv4/route.c
104766index 20fc020..3ba426f 100644
104767--- a/net/ipv4/route.c
104768+++ b/net/ipv4/route.c
104769@@ -228,7 +228,7 @@ static const struct seq_operations rt_cache_seq_ops = {
104770
104771 static int rt_cache_seq_open(struct inode *inode, struct file *file)
104772 {
104773- return seq_open(file, &rt_cache_seq_ops);
104774+ return seq_open_restrict(file, &rt_cache_seq_ops);
104775 }
104776
104777 static const struct file_operations rt_cache_seq_fops = {
104778@@ -319,7 +319,7 @@ static const struct seq_operations rt_cpu_seq_ops = {
104779
104780 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
104781 {
104782- return seq_open(file, &rt_cpu_seq_ops);
104783+ return seq_open_restrict(file, &rt_cpu_seq_ops);
104784 }
104785
104786 static const struct file_operations rt_cpu_seq_fops = {
104787@@ -357,7 +357,7 @@ static int rt_acct_proc_show(struct seq_file *m, void *v)
104788
104789 static int rt_acct_proc_open(struct inode *inode, struct file *file)
104790 {
104791- return single_open(file, rt_acct_proc_show, NULL);
104792+ return single_open_restrict(file, rt_acct_proc_show, NULL);
104793 }
104794
104795 static const struct file_operations rt_acct_proc_fops = {
104796@@ -459,11 +459,11 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
104797
104798 #define IP_IDENTS_SZ 2048u
104799 struct ip_ident_bucket {
104800- atomic_t id;
104801+ atomic_unchecked_t id;
104802 u32 stamp32;
104803 };
104804
104805-static struct ip_ident_bucket *ip_idents __read_mostly;
104806+static struct ip_ident_bucket ip_idents[IP_IDENTS_SZ] __read_mostly;
104807
104808 /* In order to protect privacy, we add a perturbation to identifiers
104809 * if one generator is seldom used. This makes hard for an attacker
104810@@ -479,7 +479,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
104811 if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
104812 delta = prandom_u32_max(now - old);
104813
104814- return atomic_add_return(segs + delta, &bucket->id) - segs;
104815+ return atomic_add_return_unchecked(segs + delta, &bucket->id) - segs;
104816 }
104817 EXPORT_SYMBOL(ip_idents_reserve);
104818
104819@@ -2639,34 +2639,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
104820 .maxlen = sizeof(int),
104821 .mode = 0200,
104822 .proc_handler = ipv4_sysctl_rtcache_flush,
104823+ .extra1 = &init_net,
104824 },
104825 { },
104826 };
104827
104828 static __net_init int sysctl_route_net_init(struct net *net)
104829 {
104830- struct ctl_table *tbl;
104831+ ctl_table_no_const *tbl = NULL;
104832
104833- tbl = ipv4_route_flush_table;
104834 if (!net_eq(net, &init_net)) {
104835- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
104836+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
104837 if (tbl == NULL)
104838 goto err_dup;
104839
104840 /* Don't export sysctls to unprivileged users */
104841 if (net->user_ns != &init_user_ns)
104842 tbl[0].procname = NULL;
104843- }
104844- tbl[0].extra1 = net;
104845+ tbl[0].extra1 = net;
104846+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
104847+ } else
104848+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
104849
104850- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
104851 if (net->ipv4.route_hdr == NULL)
104852 goto err_reg;
104853 return 0;
104854
104855 err_reg:
104856- if (tbl != ipv4_route_flush_table)
104857- kfree(tbl);
104858+ kfree(tbl);
104859 err_dup:
104860 return -ENOMEM;
104861 }
104862@@ -2689,8 +2689,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
104863
104864 static __net_init int rt_genid_init(struct net *net)
104865 {
104866- atomic_set(&net->ipv4.rt_genid, 0);
104867- atomic_set(&net->fnhe_genid, 0);
104868+ atomic_set_unchecked(&net->ipv4.rt_genid, 0);
104869+ atomic_set_unchecked(&net->fnhe_genid, 0);
104870 get_random_bytes(&net->ipv4.dev_addr_genid,
104871 sizeof(net->ipv4.dev_addr_genid));
104872 return 0;
104873@@ -2734,11 +2734,7 @@ int __init ip_rt_init(void)
104874 int rc = 0;
104875 int cpu;
104876
104877- ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
104878- if (!ip_idents)
104879- panic("IP: failed to allocate ip_idents\n");
104880-
104881- prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
104882+ prandom_bytes(ip_idents, sizeof(ip_idents));
104883
104884 for_each_possible_cpu(cpu) {
104885 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
104886diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
104887index d151539..5f5e247 100644
104888--- a/net/ipv4/sysctl_net_ipv4.c
104889+++ b/net/ipv4/sysctl_net_ipv4.c
104890@@ -60,7 +60,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
104891 container_of(table->data, struct net, ipv4.ip_local_ports.range);
104892 int ret;
104893 int range[2];
104894- struct ctl_table tmp = {
104895+ ctl_table_no_const tmp = {
104896 .data = &range,
104897 .maxlen = sizeof(range),
104898 .mode = table->mode,
104899@@ -118,7 +118,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
104900 int ret;
104901 gid_t urange[2];
104902 kgid_t low, high;
104903- struct ctl_table tmp = {
104904+ ctl_table_no_const tmp = {
104905 .data = &urange,
104906 .maxlen = sizeof(urange),
104907 .mode = table->mode,
104908@@ -149,7 +149,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
104909 void __user *buffer, size_t *lenp, loff_t *ppos)
104910 {
104911 char val[TCP_CA_NAME_MAX];
104912- struct ctl_table tbl = {
104913+ ctl_table_no_const tbl = {
104914 .data = val,
104915 .maxlen = TCP_CA_NAME_MAX,
104916 };
104917@@ -168,7 +168,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
104918 void __user *buffer, size_t *lenp,
104919 loff_t *ppos)
104920 {
104921- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
104922+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
104923 int ret;
104924
104925 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
104926@@ -185,7 +185,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
104927 void __user *buffer, size_t *lenp,
104928 loff_t *ppos)
104929 {
104930- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
104931+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
104932 int ret;
104933
104934 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
104935@@ -204,7 +204,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
104936 void __user *buffer, size_t *lenp,
104937 loff_t *ppos)
104938 {
104939- struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
104940+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
104941 struct tcp_fastopen_context *ctxt;
104942 int ret;
104943 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
104944@@ -888,13 +888,12 @@ static struct ctl_table ipv4_net_table[] = {
104945
104946 static __net_init int ipv4_sysctl_init_net(struct net *net)
104947 {
104948- struct ctl_table *table;
104949+ ctl_table_no_const *table = NULL;
104950
104951- table = ipv4_net_table;
104952 if (!net_eq(net, &init_net)) {
104953 int i;
104954
104955- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
104956+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
104957 if (table == NULL)
104958 goto err_alloc;
104959
104960@@ -903,7 +902,10 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
104961 table[i].data += (void *)net - (void *)&init_net;
104962 }
104963
104964- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
104965+ if (!net_eq(net, &init_net))
104966+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
104967+ else
104968+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
104969 if (net->ipv4.ipv4_hdr == NULL)
104970 goto err_reg;
104971
104972diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
104973index 995a225..e1e9183 100644
104974--- a/net/ipv4/tcp.c
104975+++ b/net/ipv4/tcp.c
104976@@ -520,8 +520,10 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
104977
104978 /* Race breaker. If space is freed after
104979 * wspace test but before the flags are set,
104980- * IO signal will be lost.
104981+ * IO signal will be lost. Memory barrier
104982+ * pairs with the input side.
104983 */
104984+ smp_mb__after_atomic();
104985 if (sk_stream_is_writeable(sk))
104986 mask |= POLLOUT | POLLWRNORM;
104987 }
104988diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
104989index f501ac04..0c5a1b2 100644
104990--- a/net/ipv4/tcp_input.c
104991+++ b/net/ipv4/tcp_input.c
104992@@ -767,7 +767,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
104993 * without any lock. We want to make sure compiler wont store
104994 * intermediate values in this location.
104995 */
104996- ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
104997+ ACCESS_ONCE_RW(sk->sk_pacing_rate) = min_t(u64, rate,
104998 sk->sk_max_pacing_rate);
104999 }
105000
105001@@ -4541,7 +4541,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
105002 * simplifies code)
105003 */
105004 static void
105005-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
105006+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
105007 struct sk_buff *head, struct sk_buff *tail,
105008 u32 start, u32 end)
105009 {
105010@@ -4799,6 +4799,8 @@ static void tcp_check_space(struct sock *sk)
105011 {
105012 if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
105013 sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
105014+ /* pairs with tcp_poll() */
105015+ smp_mb__after_atomic();
105016 if (sk->sk_socket &&
105017 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
105018 tcp_new_space(sk);
105019@@ -5525,6 +5527,7 @@ discard:
105020 tcp_paws_reject(&tp->rx_opt, 0))
105021 goto discard_and_undo;
105022
105023+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
105024 if (th->syn) {
105025 /* We see SYN without ACK. It is attempt of
105026 * simultaneous connect with crossed SYNs.
105027@@ -5575,6 +5578,7 @@ discard:
105028 goto discard;
105029 #endif
105030 }
105031+#endif
105032 /* "fifth, if neither of the SYN or RST bits is set then
105033 * drop the segment and return."
105034 */
105035@@ -5621,7 +5625,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
105036 goto discard;
105037
105038 if (th->syn) {
105039- if (th->fin)
105040+ if (th->fin || th->urg || th->psh)
105041 goto discard;
105042 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
105043 return 1;
105044diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
105045index f1756ee..8908cb0 100644
105046--- a/net/ipv4/tcp_ipv4.c
105047+++ b/net/ipv4/tcp_ipv4.c
105048@@ -89,6 +89,10 @@ int sysctl_tcp_tw_reuse __read_mostly;
105049 int sysctl_tcp_low_latency __read_mostly;
105050 EXPORT_SYMBOL(sysctl_tcp_low_latency);
105051
105052+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105053+extern int grsec_enable_blackhole;
105054+#endif
105055+
105056 #ifdef CONFIG_TCP_MD5SIG
105057 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
105058 __be32 daddr, __be32 saddr, const struct tcphdr *th);
105059@@ -1475,6 +1479,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
105060 return 0;
105061
105062 reset:
105063+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105064+ if (!grsec_enable_blackhole)
105065+#endif
105066 tcp_v4_send_reset(rsk, skb);
105067 discard:
105068 kfree_skb(skb);
105069@@ -1639,12 +1646,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
105070 TCP_SKB_CB(skb)->sacked = 0;
105071
105072 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
105073- if (!sk)
105074+ if (!sk) {
105075+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105076+ ret = 1;
105077+#endif
105078 goto no_tcp_socket;
105079-
105080+ }
105081 process:
105082- if (sk->sk_state == TCP_TIME_WAIT)
105083+ if (sk->sk_state == TCP_TIME_WAIT) {
105084+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105085+ ret = 2;
105086+#endif
105087 goto do_time_wait;
105088+ }
105089
105090 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
105091 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
105092@@ -1700,6 +1714,10 @@ csum_error:
105093 bad_packet:
105094 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
105095 } else {
105096+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105097+ if (!grsec_enable_blackhole || (ret == 1 &&
105098+ (skb->dev->flags & IFF_LOOPBACK)))
105099+#endif
105100 tcp_v4_send_reset(NULL, skb);
105101 }
105102
105103diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
105104index dd11ac7..c0872da 100644
105105--- a/net/ipv4/tcp_minisocks.c
105106+++ b/net/ipv4/tcp_minisocks.c
105107@@ -27,6 +27,10 @@
105108 #include <net/inet_common.h>
105109 #include <net/xfrm.h>
105110
105111+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105112+extern int grsec_enable_blackhole;
105113+#endif
105114+
105115 int sysctl_tcp_syncookies __read_mostly = 1;
105116 EXPORT_SYMBOL(sysctl_tcp_syncookies);
105117
105118@@ -785,7 +789,10 @@ embryonic_reset:
105119 * avoid becoming vulnerable to outside attack aiming at
105120 * resetting legit local connections.
105121 */
105122- req->rsk_ops->send_reset(sk, skb);
105123+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105124+ if (!grsec_enable_blackhole)
105125+#endif
105126+ req->rsk_ops->send_reset(sk, skb);
105127 } else if (fastopen) { /* received a valid RST pkt */
105128 reqsk_fastopen_remove(sk, req, true);
105129 tcp_reset(sk);
105130diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
105131index ebf5ff5..4d1ff32 100644
105132--- a/net/ipv4/tcp_probe.c
105133+++ b/net/ipv4/tcp_probe.c
105134@@ -236,7 +236,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
105135 if (cnt + width >= len)
105136 break;
105137
105138- if (copy_to_user(buf + cnt, tbuf, width))
105139+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
105140 return -EFAULT;
105141 cnt += width;
105142 }
105143diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
105144index 0732b78..a82bdc6 100644
105145--- a/net/ipv4/tcp_timer.c
105146+++ b/net/ipv4/tcp_timer.c
105147@@ -22,6 +22,10 @@
105148 #include <linux/gfp.h>
105149 #include <net/tcp.h>
105150
105151+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105152+extern int grsec_lastack_retries;
105153+#endif
105154+
105155 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
105156 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
105157 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
105158@@ -194,6 +198,13 @@ static int tcp_write_timeout(struct sock *sk)
105159 }
105160 }
105161
105162+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105163+ if ((sk->sk_state == TCP_LAST_ACK) &&
105164+ (grsec_lastack_retries > 0) &&
105165+ (grsec_lastack_retries < retry_until))
105166+ retry_until = grsec_lastack_retries;
105167+#endif
105168+
105169 if (retransmits_timed_out(sk, retry_until,
105170 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
105171 /* Has it gone just too far? */
105172diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
105173index 97ef1f8b..e446c33 100644
105174--- a/net/ipv4/udp.c
105175+++ b/net/ipv4/udp.c
105176@@ -87,6 +87,7 @@
105177 #include <linux/types.h>
105178 #include <linux/fcntl.h>
105179 #include <linux/module.h>
105180+#include <linux/security.h>
105181 #include <linux/socket.h>
105182 #include <linux/sockios.h>
105183 #include <linux/igmp.h>
105184@@ -114,6 +115,10 @@
105185 #include <net/busy_poll.h>
105186 #include "udp_impl.h"
105187
105188+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105189+extern int grsec_enable_blackhole;
105190+#endif
105191+
105192 struct udp_table udp_table __read_mostly;
105193 EXPORT_SYMBOL(udp_table);
105194
105195@@ -608,6 +613,9 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
105196 return true;
105197 }
105198
105199+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
105200+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
105201+
105202 /*
105203 * This routine is called by the ICMP module when it gets some
105204 * sort of error condition. If err < 0 then the socket should
105205@@ -945,9 +953,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
105206 dport = usin->sin_port;
105207 if (dport == 0)
105208 return -EINVAL;
105209+
105210+ err = gr_search_udp_sendmsg(sk, usin);
105211+ if (err)
105212+ return err;
105213 } else {
105214 if (sk->sk_state != TCP_ESTABLISHED)
105215 return -EDESTADDRREQ;
105216+
105217+ err = gr_search_udp_sendmsg(sk, NULL);
105218+ if (err)
105219+ return err;
105220+
105221 daddr = inet->inet_daddr;
105222 dport = inet->inet_dport;
105223 /* Open fast path for connected socket.
105224@@ -1195,7 +1212,7 @@ static unsigned int first_packet_length(struct sock *sk)
105225 IS_UDPLITE(sk));
105226 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
105227 IS_UDPLITE(sk));
105228- atomic_inc(&sk->sk_drops);
105229+ atomic_inc_unchecked(&sk->sk_drops);
105230 __skb_unlink(skb, rcvq);
105231 __skb_queue_tail(&list_kill, skb);
105232 }
105233@@ -1275,6 +1292,10 @@ try_again:
105234 if (!skb)
105235 goto out;
105236
105237+ err = gr_search_udp_recvmsg(sk, skb);
105238+ if (err)
105239+ goto out_free;
105240+
105241 ulen = skb->len - sizeof(struct udphdr);
105242 copied = len;
105243 if (copied > ulen)
105244@@ -1307,7 +1328,7 @@ try_again:
105245 if (unlikely(err)) {
105246 trace_kfree_skb(skb, udp_recvmsg);
105247 if (!peeked) {
105248- atomic_inc(&sk->sk_drops);
105249+ atomic_inc_unchecked(&sk->sk_drops);
105250 UDP_INC_STATS_USER(sock_net(sk),
105251 UDP_MIB_INERRORS, is_udplite);
105252 }
105253@@ -1605,7 +1626,7 @@ csum_error:
105254 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
105255 drop:
105256 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
105257- atomic_inc(&sk->sk_drops);
105258+ atomic_inc_unchecked(&sk->sk_drops);
105259 kfree_skb(skb);
105260 return -1;
105261 }
105262@@ -1624,7 +1645,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
105263 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
105264
105265 if (!skb1) {
105266- atomic_inc(&sk->sk_drops);
105267+ atomic_inc_unchecked(&sk->sk_drops);
105268 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
105269 IS_UDPLITE(sk));
105270 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
105271@@ -1830,6 +1851,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
105272 goto csum_error;
105273
105274 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
105275+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105276+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
105277+#endif
105278 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
105279
105280 /*
105281@@ -2416,7 +2440,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
105282 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
105283 0, sock_i_ino(sp),
105284 atomic_read(&sp->sk_refcnt), sp,
105285- atomic_read(&sp->sk_drops));
105286+ atomic_read_unchecked(&sp->sk_drops));
105287 }
105288
105289 int udp4_seq_show(struct seq_file *seq, void *v)
105290diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
105291index 6156f68..d6ab46d 100644
105292--- a/net/ipv4/xfrm4_policy.c
105293+++ b/net/ipv4/xfrm4_policy.c
105294@@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
105295 fl4->flowi4_tos = iph->tos;
105296 }
105297
105298-static inline int xfrm4_garbage_collect(struct dst_ops *ops)
105299+static int xfrm4_garbage_collect(struct dst_ops *ops)
105300 {
105301 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
105302
105303- xfrm4_policy_afinfo.garbage_collect(net);
105304+ xfrm_garbage_collect_deferred(net);
105305 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
105306 }
105307
105308@@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = {
105309
105310 static int __net_init xfrm4_net_init(struct net *net)
105311 {
105312- struct ctl_table *table;
105313+ ctl_table_no_const *table = NULL;
105314 struct ctl_table_header *hdr;
105315
105316- table = xfrm4_policy_table;
105317 if (!net_eq(net, &init_net)) {
105318- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
105319+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
105320 if (!table)
105321 goto err_alloc;
105322
105323 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
105324- }
105325-
105326- hdr = register_net_sysctl(net, "net/ipv4", table);
105327+ hdr = register_net_sysctl(net, "net/ipv4", table);
105328+ } else
105329+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
105330 if (!hdr)
105331 goto err_reg;
105332
105333@@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net)
105334 return 0;
105335
105336 err_reg:
105337- if (!net_eq(net, &init_net))
105338- kfree(table);
105339+ kfree(table);
105340 err_alloc:
105341 return -ENOMEM;
105342 }
105343diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
105344index b603002..0de5c88 100644
105345--- a/net/ipv6/addrconf.c
105346+++ b/net/ipv6/addrconf.c
105347@@ -171,7 +171,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
105348 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
105349 .mtu6 = IPV6_MIN_MTU,
105350 .accept_ra = 1,
105351- .accept_redirects = 1,
105352+ .accept_redirects = 0,
105353 .autoconf = 1,
105354 .force_mld_version = 0,
105355 .mldv1_unsolicited_report_interval = 10 * HZ,
105356@@ -209,7 +209,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
105357 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
105358 .mtu6 = IPV6_MIN_MTU,
105359 .accept_ra = 1,
105360- .accept_redirects = 1,
105361+ .accept_redirects = 0,
105362 .autoconf = 1,
105363 .force_mld_version = 0,
105364 .mldv1_unsolicited_report_interval = 10 * HZ,
105365@@ -607,7 +607,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
105366 idx = 0;
105367 head = &net->dev_index_head[h];
105368 rcu_read_lock();
105369- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
105370+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
105371 net->dev_base_seq;
105372 hlist_for_each_entry_rcu(dev, head, index_hlist) {
105373 if (idx < s_idx)
105374@@ -2438,7 +2438,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
105375 p.iph.ihl = 5;
105376 p.iph.protocol = IPPROTO_IPV6;
105377 p.iph.ttl = 64;
105378- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
105379+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
105380
105381 if (ops->ndo_do_ioctl) {
105382 mm_segment_t oldfs = get_fs();
105383@@ -3587,16 +3587,23 @@ static const struct file_operations if6_fops = {
105384 .release = seq_release_net,
105385 };
105386
105387+extern void register_ipv6_seq_ops_addr(struct seq_operations *addr);
105388+extern void unregister_ipv6_seq_ops_addr(void);
105389+
105390 static int __net_init if6_proc_net_init(struct net *net)
105391 {
105392- if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
105393+ register_ipv6_seq_ops_addr(&if6_seq_ops);
105394+ if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops)) {
105395+ unregister_ipv6_seq_ops_addr();
105396 return -ENOMEM;
105397+ }
105398 return 0;
105399 }
105400
105401 static void __net_exit if6_proc_net_exit(struct net *net)
105402 {
105403 remove_proc_entry("if_inet6", net->proc_net);
105404+ unregister_ipv6_seq_ops_addr();
105405 }
105406
105407 static struct pernet_operations if6_proc_net_ops = {
105408@@ -4215,7 +4222,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
105409 s_ip_idx = ip_idx = cb->args[2];
105410
105411 rcu_read_lock();
105412- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
105413+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
105414 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
105415 idx = 0;
105416 head = &net->dev_index_head[h];
105417@@ -4864,7 +4871,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
105418 rt_genid_bump_ipv6(net);
105419 break;
105420 }
105421- atomic_inc(&net->ipv6.dev_addr_genid);
105422+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
105423 }
105424
105425 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
105426@@ -4884,7 +4891,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
105427 int *valp = ctl->data;
105428 int val = *valp;
105429 loff_t pos = *ppos;
105430- struct ctl_table lctl;
105431+ ctl_table_no_const lctl;
105432 int ret;
105433
105434 /*
105435@@ -4909,7 +4916,7 @@ int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
105436 {
105437 struct inet6_dev *idev = ctl->extra1;
105438 int min_mtu = IPV6_MIN_MTU;
105439- struct ctl_table lctl;
105440+ ctl_table_no_const lctl;
105441
105442 lctl = *ctl;
105443 lctl.extra1 = &min_mtu;
105444@@ -4984,7 +4991,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
105445 int *valp = ctl->data;
105446 int val = *valp;
105447 loff_t pos = *ppos;
105448- struct ctl_table lctl;
105449+ ctl_table_no_const lctl;
105450 int ret;
105451
105452 /*
105453diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
105454index e8c4400..a4cd5da 100644
105455--- a/net/ipv6/af_inet6.c
105456+++ b/net/ipv6/af_inet6.c
105457@@ -766,7 +766,7 @@ static int __net_init inet6_net_init(struct net *net)
105458 net->ipv6.sysctl.icmpv6_time = 1*HZ;
105459 net->ipv6.sysctl.flowlabel_consistency = 1;
105460 net->ipv6.sysctl.auto_flowlabels = 0;
105461- atomic_set(&net->ipv6.fib6_sernum, 1);
105462+ atomic_set_unchecked(&net->ipv6.fib6_sernum, 1);
105463
105464 err = ipv6_init_mibs(net);
105465 if (err)
105466diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
105467index ace8dac..bd6942d 100644
105468--- a/net/ipv6/datagram.c
105469+++ b/net/ipv6/datagram.c
105470@@ -957,5 +957,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
105471 0,
105472 sock_i_ino(sp),
105473 atomic_read(&sp->sk_refcnt), sp,
105474- atomic_read(&sp->sk_drops));
105475+ atomic_read_unchecked(&sp->sk_drops));
105476 }
105477diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
105478index a5e9519..16b7412 100644
105479--- a/net/ipv6/icmp.c
105480+++ b/net/ipv6/icmp.c
105481@@ -1005,7 +1005,7 @@ static struct ctl_table ipv6_icmp_table_template[] = {
105482
105483 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
105484 {
105485- struct ctl_table *table;
105486+ ctl_table_no_const *table;
105487
105488 table = kmemdup(ipv6_icmp_table_template,
105489 sizeof(ipv6_icmp_table_template),
105490diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
105491index 263ef41..88c7be8 100644
105492--- a/net/ipv6/ip6_fib.c
105493+++ b/net/ipv6/ip6_fib.c
105494@@ -99,9 +99,9 @@ static int fib6_new_sernum(struct net *net)
105495 int new, old;
105496
105497 do {
105498- old = atomic_read(&net->ipv6.fib6_sernum);
105499+ old = atomic_read_unchecked(&net->ipv6.fib6_sernum);
105500 new = old < INT_MAX ? old + 1 : 1;
105501- } while (atomic_cmpxchg(&net->ipv6.fib6_sernum,
105502+ } while (atomic_cmpxchg_unchecked(&net->ipv6.fib6_sernum,
105503 old, new) != old);
105504 return new;
105505 }
105506diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
105507index bc28b7d..a08feea 100644
105508--- a/net/ipv6/ip6_gre.c
105509+++ b/net/ipv6/ip6_gre.c
105510@@ -71,8 +71,8 @@ struct ip6gre_net {
105511 struct net_device *fb_tunnel_dev;
105512 };
105513
105514-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
105515-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
105516+static struct rtnl_link_ops ip6gre_link_ops;
105517+static struct rtnl_link_ops ip6gre_tap_ops;
105518 static int ip6gre_tunnel_init(struct net_device *dev);
105519 static void ip6gre_tunnel_setup(struct net_device *dev);
105520 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
105521@@ -1289,7 +1289,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
105522 }
105523
105524
105525-static struct inet6_protocol ip6gre_protocol __read_mostly = {
105526+static struct inet6_protocol ip6gre_protocol = {
105527 .handler = ip6gre_rcv,
105528 .err_handler = ip6gre_err,
105529 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
105530@@ -1650,7 +1650,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
105531 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
105532 };
105533
105534-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
105535+static struct rtnl_link_ops ip6gre_link_ops = {
105536 .kind = "ip6gre",
105537 .maxtype = IFLA_GRE_MAX,
105538 .policy = ip6gre_policy,
105539@@ -1665,7 +1665,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
105540 .get_link_net = ip6_tnl_get_link_net,
105541 };
105542
105543-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
105544+static struct rtnl_link_ops ip6gre_tap_ops = {
105545 .kind = "ip6gretap",
105546 .maxtype = IFLA_GRE_MAX,
105547 .policy = ip6gre_policy,
105548diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
105549index ddd94ec..b7cfefb 100644
105550--- a/net/ipv6/ip6_tunnel.c
105551+++ b/net/ipv6/ip6_tunnel.c
105552@@ -86,7 +86,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
105553
105554 static int ip6_tnl_dev_init(struct net_device *dev);
105555 static void ip6_tnl_dev_setup(struct net_device *dev);
105556-static struct rtnl_link_ops ip6_link_ops __read_mostly;
105557+static struct rtnl_link_ops ip6_link_ops;
105558
105559 static int ip6_tnl_net_id __read_mostly;
105560 struct ip6_tnl_net {
105561@@ -1780,7 +1780,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
105562 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
105563 };
105564
105565-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
105566+static struct rtnl_link_ops ip6_link_ops = {
105567 .kind = "ip6tnl",
105568 .maxtype = IFLA_IPTUN_MAX,
105569 .policy = ip6_tnl_policy,
105570diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
105571index 5fb9e21..92bf04b 100644
105572--- a/net/ipv6/ip6_vti.c
105573+++ b/net/ipv6/ip6_vti.c
105574@@ -62,7 +62,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
105575
105576 static int vti6_dev_init(struct net_device *dev);
105577 static void vti6_dev_setup(struct net_device *dev);
105578-static struct rtnl_link_ops vti6_link_ops __read_mostly;
105579+static struct rtnl_link_ops vti6_link_ops;
105580
105581 static int vti6_net_id __read_mostly;
105582 struct vti6_net {
105583@@ -1004,7 +1004,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
105584 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
105585 };
105586
105587-static struct rtnl_link_ops vti6_link_ops __read_mostly = {
105588+static struct rtnl_link_ops vti6_link_ops = {
105589 .kind = "vti6",
105590 .maxtype = IFLA_VTI_MAX,
105591 .policy = vti6_policy,
105592diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
105593index 8d766d9..dcdfea7 100644
105594--- a/net/ipv6/ipv6_sockglue.c
105595+++ b/net/ipv6/ipv6_sockglue.c
105596@@ -989,7 +989,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
105597 if (sk->sk_type != SOCK_STREAM)
105598 return -ENOPROTOOPT;
105599
105600- msg.msg_control = optval;
105601+ msg.msg_control = (void __force_kernel *)optval;
105602 msg.msg_controllen = len;
105603 msg.msg_flags = flags;
105604
105605diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
105606index bb00c6f..16c90d7 100644
105607--- a/net/ipv6/netfilter/ip6_tables.c
105608+++ b/net/ipv6/netfilter/ip6_tables.c
105609@@ -1083,14 +1083,14 @@ static int compat_table_info(const struct xt_table_info *info,
105610 #endif
105611
105612 static int get_info(struct net *net, void __user *user,
105613- const int *len, int compat)
105614+ int len, int compat)
105615 {
105616 char name[XT_TABLE_MAXNAMELEN];
105617 struct xt_table *t;
105618 int ret;
105619
105620- if (*len != sizeof(struct ip6t_getinfo)) {
105621- duprintf("length %u != %zu\n", *len,
105622+ if (len != sizeof(struct ip6t_getinfo)) {
105623+ duprintf("length %u != %zu\n", len,
105624 sizeof(struct ip6t_getinfo));
105625 return -EINVAL;
105626 }
105627@@ -1127,7 +1127,7 @@ static int get_info(struct net *net, void __user *user,
105628 info.size = private->size;
105629 strcpy(info.name, name);
105630
105631- if (copy_to_user(user, &info, *len) != 0)
105632+ if (copy_to_user(user, &info, len) != 0)
105633 ret = -EFAULT;
105634 else
105635 ret = 0;
105636@@ -1983,7 +1983,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
105637
105638 switch (cmd) {
105639 case IP6T_SO_GET_INFO:
105640- ret = get_info(sock_net(sk), user, len, 1);
105641+ ret = get_info(sock_net(sk), user, *len, 1);
105642 break;
105643 case IP6T_SO_GET_ENTRIES:
105644 ret = compat_get_entries(sock_net(sk), user, len);
105645@@ -2030,7 +2030,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
105646
105647 switch (cmd) {
105648 case IP6T_SO_GET_INFO:
105649- ret = get_info(sock_net(sk), user, len, 0);
105650+ ret = get_info(sock_net(sk), user, *len, 0);
105651 break;
105652
105653 case IP6T_SO_GET_ENTRIES:
105654diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
105655index 6f187c8..34b367f 100644
105656--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
105657+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
105658@@ -96,12 +96,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
105659
105660 static int nf_ct_frag6_sysctl_register(struct net *net)
105661 {
105662- struct ctl_table *table;
105663+ ctl_table_no_const *table = NULL;
105664 struct ctl_table_header *hdr;
105665
105666- table = nf_ct_frag6_sysctl_table;
105667 if (!net_eq(net, &init_net)) {
105668- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
105669+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
105670 GFP_KERNEL);
105671 if (table == NULL)
105672 goto err_alloc;
105673@@ -112,9 +111,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
105674 table[2].data = &net->nf_frag.frags.high_thresh;
105675 table[2].extra1 = &net->nf_frag.frags.low_thresh;
105676 table[2].extra2 = &init_net.nf_frag.frags.high_thresh;
105677- }
105678-
105679- hdr = register_net_sysctl(net, "net/netfilter", table);
105680+ hdr = register_net_sysctl(net, "net/netfilter", table);
105681+ } else
105682+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
105683 if (hdr == NULL)
105684 goto err_reg;
105685
105686@@ -122,8 +121,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
105687 return 0;
105688
105689 err_reg:
105690- if (!net_eq(net, &init_net))
105691- kfree(table);
105692+ kfree(table);
105693 err_alloc:
105694 return -ENOMEM;
105695 }
105696diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
105697index a2dfff6..1e52e6d 100644
105698--- a/net/ipv6/ping.c
105699+++ b/net/ipv6/ping.c
105700@@ -241,6 +241,24 @@ static struct pernet_operations ping_v6_net_ops = {
105701 };
105702 #endif
105703
105704+static struct pingv6_ops real_pingv6_ops = {
105705+ .ipv6_recv_error = ipv6_recv_error,
105706+ .ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl,
105707+ .ip6_datagram_recv_specific_ctl = ip6_datagram_recv_specific_ctl,
105708+ .icmpv6_err_convert = icmpv6_err_convert,
105709+ .ipv6_icmp_error = ipv6_icmp_error,
105710+ .ipv6_chk_addr = ipv6_chk_addr,
105711+};
105712+
105713+static struct pingv6_ops dummy_pingv6_ops = {
105714+ .ipv6_recv_error = dummy_ipv6_recv_error,
105715+ .ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl,
105716+ .ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl,
105717+ .icmpv6_err_convert = dummy_icmpv6_err_convert,
105718+ .ipv6_icmp_error = dummy_ipv6_icmp_error,
105719+ .ipv6_chk_addr = dummy_ipv6_chk_addr,
105720+};
105721+
105722 int __init pingv6_init(void)
105723 {
105724 #ifdef CONFIG_PROC_FS
105725@@ -248,13 +266,7 @@ int __init pingv6_init(void)
105726 if (ret)
105727 return ret;
105728 #endif
105729- pingv6_ops.ipv6_recv_error = ipv6_recv_error;
105730- pingv6_ops.ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl;
105731- pingv6_ops.ip6_datagram_recv_specific_ctl =
105732- ip6_datagram_recv_specific_ctl;
105733- pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
105734- pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
105735- pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
105736+ pingv6_ops = &real_pingv6_ops;
105737 return inet6_register_protosw(&pingv6_protosw);
105738 }
105739
105740@@ -263,14 +275,9 @@ int __init pingv6_init(void)
105741 */
105742 void pingv6_exit(void)
105743 {
105744- pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
105745- pingv6_ops.ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl;
105746- pingv6_ops.ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl;
105747- pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
105748- pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
105749- pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
105750 #ifdef CONFIG_PROC_FS
105751 unregister_pernet_subsys(&ping_v6_net_ops);
105752 #endif
105753+ pingv6_ops = &dummy_pingv6_ops;
105754 inet6_unregister_protosw(&pingv6_protosw);
105755 }
105756diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
105757index 679253d0..70b653c 100644
105758--- a/net/ipv6/proc.c
105759+++ b/net/ipv6/proc.c
105760@@ -310,7 +310,7 @@ static int __net_init ipv6_proc_init_net(struct net *net)
105761 if (!proc_create("snmp6", S_IRUGO, net->proc_net, &snmp6_seq_fops))
105762 goto proc_snmp6_fail;
105763
105764- net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net);
105765+ net->mib.proc_net_devsnmp6 = proc_mkdir_restrict("dev_snmp6", net->proc_net);
105766 if (!net->mib.proc_net_devsnmp6)
105767 goto proc_dev_snmp6_fail;
105768 return 0;
105769diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
105770index dae7f1a..783b20d 100644
105771--- a/net/ipv6/raw.c
105772+++ b/net/ipv6/raw.c
105773@@ -388,7 +388,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
105774 {
105775 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
105776 skb_checksum_complete(skb)) {
105777- atomic_inc(&sk->sk_drops);
105778+ atomic_inc_unchecked(&sk->sk_drops);
105779 kfree_skb(skb);
105780 return NET_RX_DROP;
105781 }
105782@@ -416,7 +416,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
105783 struct raw6_sock *rp = raw6_sk(sk);
105784
105785 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
105786- atomic_inc(&sk->sk_drops);
105787+ atomic_inc_unchecked(&sk->sk_drops);
105788 kfree_skb(skb);
105789 return NET_RX_DROP;
105790 }
105791@@ -440,7 +440,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
105792
105793 if (inet->hdrincl) {
105794 if (skb_checksum_complete(skb)) {
105795- atomic_inc(&sk->sk_drops);
105796+ atomic_inc_unchecked(&sk->sk_drops);
105797 kfree_skb(skb);
105798 return NET_RX_DROP;
105799 }
105800@@ -609,7 +609,7 @@ out:
105801 return err;
105802 }
105803
105804-static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
105805+static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, unsigned int length,
105806 struct flowi6 *fl6, struct dst_entry **dstp,
105807 unsigned int flags)
105808 {
105809@@ -915,12 +915,15 @@ do_confirm:
105810 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
105811 char __user *optval, int optlen)
105812 {
105813+ struct icmp6_filter filter;
105814+
105815 switch (optname) {
105816 case ICMPV6_FILTER:
105817 if (optlen > sizeof(struct icmp6_filter))
105818 optlen = sizeof(struct icmp6_filter);
105819- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
105820+ if (copy_from_user(&filter, optval, optlen))
105821 return -EFAULT;
105822+ raw6_sk(sk)->filter = filter;
105823 return 0;
105824 default:
105825 return -ENOPROTOOPT;
105826@@ -933,6 +936,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
105827 char __user *optval, int __user *optlen)
105828 {
105829 int len;
105830+ struct icmp6_filter filter;
105831
105832 switch (optname) {
105833 case ICMPV6_FILTER:
105834@@ -944,7 +948,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
105835 len = sizeof(struct icmp6_filter);
105836 if (put_user(len, optlen))
105837 return -EFAULT;
105838- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
105839+ filter = raw6_sk(sk)->filter;
105840+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
105841 return -EFAULT;
105842 return 0;
105843 default:
105844diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
105845index d7d70e6..bd5e9fc 100644
105846--- a/net/ipv6/reassembly.c
105847+++ b/net/ipv6/reassembly.c
105848@@ -626,12 +626,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
105849
105850 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
105851 {
105852- struct ctl_table *table;
105853+ ctl_table_no_const *table = NULL;
105854 struct ctl_table_header *hdr;
105855
105856- table = ip6_frags_ns_ctl_table;
105857 if (!net_eq(net, &init_net)) {
105858- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
105859+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
105860 if (table == NULL)
105861 goto err_alloc;
105862
105863@@ -645,9 +644,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
105864 /* Don't export sysctls to unprivileged users */
105865 if (net->user_ns != &init_user_ns)
105866 table[0].procname = NULL;
105867- }
105868+ hdr = register_net_sysctl(net, "net/ipv6", table);
105869+ } else
105870+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
105871
105872- hdr = register_net_sysctl(net, "net/ipv6", table);
105873 if (hdr == NULL)
105874 goto err_reg;
105875
105876@@ -655,8 +655,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
105877 return 0;
105878
105879 err_reg:
105880- if (!net_eq(net, &init_net))
105881- kfree(table);
105882+ kfree(table);
105883 err_alloc:
105884 return -ENOMEM;
105885 }
105886diff --git a/net/ipv6/route.c b/net/ipv6/route.c
105887index 4688bd4..584453d 100644
105888--- a/net/ipv6/route.c
105889+++ b/net/ipv6/route.c
105890@@ -3029,7 +3029,7 @@ struct ctl_table ipv6_route_table_template[] = {
105891
105892 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
105893 {
105894- struct ctl_table *table;
105895+ ctl_table_no_const *table;
105896
105897 table = kmemdup(ipv6_route_table_template,
105898 sizeof(ipv6_route_table_template),
105899diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
105900index e4cbd57..02b1aaa 100644
105901--- a/net/ipv6/sit.c
105902+++ b/net/ipv6/sit.c
105903@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
105904 static void ipip6_dev_free(struct net_device *dev);
105905 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
105906 __be32 *v4dst);
105907-static struct rtnl_link_ops sit_link_ops __read_mostly;
105908+static struct rtnl_link_ops sit_link_ops;
105909
105910 static int sit_net_id __read_mostly;
105911 struct sit_net {
105912@@ -1751,7 +1751,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
105913 unregister_netdevice_queue(dev, head);
105914 }
105915
105916-static struct rtnl_link_ops sit_link_ops __read_mostly = {
105917+static struct rtnl_link_ops sit_link_ops = {
105918 .kind = "sit",
105919 .maxtype = IFLA_IPTUN_MAX,
105920 .policy = ipip6_policy,
105921diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
105922index c5c10fa..2577d51 100644
105923--- a/net/ipv6/sysctl_net_ipv6.c
105924+++ b/net/ipv6/sysctl_net_ipv6.c
105925@@ -78,7 +78,7 @@ static struct ctl_table ipv6_rotable[] = {
105926
105927 static int __net_init ipv6_sysctl_net_init(struct net *net)
105928 {
105929- struct ctl_table *ipv6_table;
105930+ ctl_table_no_const *ipv6_table;
105931 struct ctl_table *ipv6_route_table;
105932 struct ctl_table *ipv6_icmp_table;
105933 int err;
105934diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
105935index 1f5e622..8387d90 100644
105936--- a/net/ipv6/tcp_ipv6.c
105937+++ b/net/ipv6/tcp_ipv6.c
105938@@ -104,6 +104,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
105939 }
105940 }
105941
105942+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105943+extern int grsec_enable_blackhole;
105944+#endif
105945+
105946 static void tcp_v6_hash(struct sock *sk)
105947 {
105948 if (sk->sk_state != TCP_CLOSE) {
105949@@ -1345,6 +1349,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
105950 return 0;
105951
105952 reset:
105953+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105954+ if (!grsec_enable_blackhole)
105955+#endif
105956 tcp_v6_send_reset(sk, skb);
105957 discard:
105958 if (opt_skb)
105959@@ -1454,12 +1461,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
105960
105961 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
105962 inet6_iif(skb));
105963- if (!sk)
105964+ if (!sk) {
105965+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105966+ ret = 1;
105967+#endif
105968 goto no_tcp_socket;
105969+ }
105970
105971 process:
105972- if (sk->sk_state == TCP_TIME_WAIT)
105973+ if (sk->sk_state == TCP_TIME_WAIT) {
105974+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105975+ ret = 2;
105976+#endif
105977 goto do_time_wait;
105978+ }
105979
105980 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
105981 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
105982@@ -1510,6 +1525,10 @@ csum_error:
105983 bad_packet:
105984 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
105985 } else {
105986+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105987+ if (!grsec_enable_blackhole || (ret == 1 &&
105988+ (skb->dev->flags & IFF_LOOPBACK)))
105989+#endif
105990 tcp_v6_send_reset(NULL, skb);
105991 }
105992
105993diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
105994index d048d46..bf141c3 100644
105995--- a/net/ipv6/udp.c
105996+++ b/net/ipv6/udp.c
105997@@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net,
105998 udp_ipv6_hash_secret + net_hash_mix(net));
105999 }
106000
106001+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
106002+extern int grsec_enable_blackhole;
106003+#endif
106004+
106005 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
106006 {
106007 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
106008@@ -448,7 +452,7 @@ try_again:
106009 if (unlikely(err)) {
106010 trace_kfree_skb(skb, udpv6_recvmsg);
106011 if (!peeked) {
106012- atomic_inc(&sk->sk_drops);
106013+ atomic_inc_unchecked(&sk->sk_drops);
106014 if (is_udp4)
106015 UDP_INC_STATS_USER(sock_net(sk),
106016 UDP_MIB_INERRORS,
106017@@ -714,7 +718,7 @@ csum_error:
106018 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
106019 drop:
106020 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
106021- atomic_inc(&sk->sk_drops);
106022+ atomic_inc_unchecked(&sk->sk_drops);
106023 kfree_skb(skb);
106024 return -1;
106025 }
106026@@ -753,7 +757,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
106027 if (likely(skb1 == NULL))
106028 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
106029 if (!skb1) {
106030- atomic_inc(&sk->sk_drops);
106031+ atomic_inc_unchecked(&sk->sk_drops);
106032 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
106033 IS_UDPLITE(sk));
106034 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
106035@@ -937,6 +941,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
106036 goto csum_error;
106037
106038 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
106039+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
106040+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
106041+#endif
106042 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
106043
106044 kfree_skb(skb);
106045diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
106046index 8d2d01b4..313511e 100644
106047--- a/net/ipv6/xfrm6_policy.c
106048+++ b/net/ipv6/xfrm6_policy.c
106049@@ -224,11 +224,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
106050 }
106051 }
106052
106053-static inline int xfrm6_garbage_collect(struct dst_ops *ops)
106054+static int xfrm6_garbage_collect(struct dst_ops *ops)
106055 {
106056 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
106057
106058- xfrm6_policy_afinfo.garbage_collect(net);
106059+ xfrm_garbage_collect_deferred(net);
106060 return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
106061 }
106062
106063@@ -341,19 +341,19 @@ static struct ctl_table xfrm6_policy_table[] = {
106064
106065 static int __net_init xfrm6_net_init(struct net *net)
106066 {
106067- struct ctl_table *table;
106068+ ctl_table_no_const *table = NULL;
106069 struct ctl_table_header *hdr;
106070
106071- table = xfrm6_policy_table;
106072 if (!net_eq(net, &init_net)) {
106073- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
106074+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
106075 if (!table)
106076 goto err_alloc;
106077
106078 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
106079- }
106080+ hdr = register_net_sysctl(net, "net/ipv6", table);
106081+ } else
106082+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
106083
106084- hdr = register_net_sysctl(net, "net/ipv6", table);
106085 if (!hdr)
106086 goto err_reg;
106087
106088@@ -361,8 +361,7 @@ static int __net_init xfrm6_net_init(struct net *net)
106089 return 0;
106090
106091 err_reg:
106092- if (!net_eq(net, &init_net))
106093- kfree(table);
106094+ kfree(table);
106095 err_alloc:
106096 return -ENOMEM;
106097 }
106098diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
106099index c1d247e..9e5949d 100644
106100--- a/net/ipx/ipx_proc.c
106101+++ b/net/ipx/ipx_proc.c
106102@@ -289,7 +289,7 @@ int __init ipx_proc_init(void)
106103 struct proc_dir_entry *p;
106104 int rc = -ENOMEM;
106105
106106- ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net);
106107+ ipx_proc_dir = proc_mkdir_restrict("ipx", init_net.proc_net);
106108
106109 if (!ipx_proc_dir)
106110 goto out;
106111diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
106112index 683346d..cb0e12d 100644
106113--- a/net/irda/ircomm/ircomm_tty.c
106114+++ b/net/irda/ircomm/ircomm_tty.c
106115@@ -310,10 +310,10 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
106116 add_wait_queue(&port->open_wait, &wait);
106117
106118 pr_debug("%s(%d):block_til_ready before block on %s open_count=%d\n",
106119- __FILE__, __LINE__, tty->driver->name, port->count);
106120+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
106121
106122 spin_lock_irqsave(&port->lock, flags);
106123- port->count--;
106124+ atomic_dec(&port->count);
106125 port->blocked_open++;
106126 spin_unlock_irqrestore(&port->lock, flags);
106127
106128@@ -348,7 +348,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
106129 }
106130
106131 pr_debug("%s(%d):block_til_ready blocking on %s open_count=%d\n",
106132- __FILE__, __LINE__, tty->driver->name, port->count);
106133+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
106134
106135 schedule();
106136 }
106137@@ -358,12 +358,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
106138
106139 spin_lock_irqsave(&port->lock, flags);
106140 if (!tty_hung_up_p(filp))
106141- port->count++;
106142+ atomic_inc(&port->count);
106143 port->blocked_open--;
106144 spin_unlock_irqrestore(&port->lock, flags);
106145
106146 pr_debug("%s(%d):block_til_ready after blocking on %s open_count=%d\n",
106147- __FILE__, __LINE__, tty->driver->name, port->count);
106148+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
106149
106150 if (!retval)
106151 port->flags |= ASYNC_NORMAL_ACTIVE;
106152@@ -433,12 +433,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
106153
106154 /* ++ is not atomic, so this should be protected - Jean II */
106155 spin_lock_irqsave(&self->port.lock, flags);
106156- self->port.count++;
106157+ atomic_inc(&self->port.count);
106158 spin_unlock_irqrestore(&self->port.lock, flags);
106159 tty_port_tty_set(&self->port, tty);
106160
106161 pr_debug("%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
106162- self->line, self->port.count);
106163+ self->line, atomic_read(&self->port.count));
106164
106165 /* Not really used by us, but lets do it anyway */
106166 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
106167@@ -961,7 +961,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
106168 tty_kref_put(port->tty);
106169 }
106170 port->tty = NULL;
106171- port->count = 0;
106172+ atomic_set(&port->count, 0);
106173 spin_unlock_irqrestore(&port->lock, flags);
106174
106175 wake_up_interruptible(&port->open_wait);
106176@@ -1308,7 +1308,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
106177 seq_putc(m, '\n');
106178
106179 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
106180- seq_printf(m, "Open count: %d\n", self->port.count);
106181+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
106182 seq_printf(m, "Max data size: %d\n", self->max_data_size);
106183 seq_printf(m, "Max header size: %d\n", self->max_header_size);
106184
106185diff --git a/net/irda/irproc.c b/net/irda/irproc.c
106186index b9ac598..f88cc56 100644
106187--- a/net/irda/irproc.c
106188+++ b/net/irda/irproc.c
106189@@ -66,7 +66,7 @@ void __init irda_proc_register(void)
106190 {
106191 int i;
106192
106193- proc_irda = proc_mkdir("irda", init_net.proc_net);
106194+ proc_irda = proc_mkdir_restrict("irda", init_net.proc_net);
106195 if (proc_irda == NULL)
106196 return;
106197
106198diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
106199index 53d9311..cbaf99f 100644
106200--- a/net/iucv/af_iucv.c
106201+++ b/net/iucv/af_iucv.c
106202@@ -686,10 +686,10 @@ static void __iucv_auto_name(struct iucv_sock *iucv)
106203 {
106204 char name[12];
106205
106206- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
106207+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
106208 while (__iucv_get_sock_by_name(name)) {
106209 sprintf(name, "%08x",
106210- atomic_inc_return(&iucv_sk_list.autobind_name));
106211+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
106212 }
106213 memcpy(iucv->src_name, name, 8);
106214 }
106215diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
106216index 2a6a1fd..6c112b0 100644
106217--- a/net/iucv/iucv.c
106218+++ b/net/iucv/iucv.c
106219@@ -702,7 +702,7 @@ static int iucv_cpu_notify(struct notifier_block *self,
106220 return NOTIFY_OK;
106221 }
106222
106223-static struct notifier_block __refdata iucv_cpu_notifier = {
106224+static struct notifier_block iucv_cpu_notifier = {
106225 .notifier_call = iucv_cpu_notify,
106226 };
106227
106228diff --git a/net/key/af_key.c b/net/key/af_key.c
106229index f8ac939..1e189bf 100644
106230--- a/net/key/af_key.c
106231+++ b/net/key/af_key.c
106232@@ -3049,10 +3049,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
106233 static u32 get_acqseq(void)
106234 {
106235 u32 res;
106236- static atomic_t acqseq;
106237+ static atomic_unchecked_t acqseq;
106238
106239 do {
106240- res = atomic_inc_return(&acqseq);
106241+ res = atomic_inc_return_unchecked(&acqseq);
106242 } while (!res);
106243 return res;
106244 }
106245diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
106246index 781b3a2..73a7434 100644
106247--- a/net/l2tp/l2tp_eth.c
106248+++ b/net/l2tp/l2tp_eth.c
106249@@ -42,12 +42,12 @@ struct l2tp_eth {
106250 struct sock *tunnel_sock;
106251 struct l2tp_session *session;
106252 struct list_head list;
106253- atomic_long_t tx_bytes;
106254- atomic_long_t tx_packets;
106255- atomic_long_t tx_dropped;
106256- atomic_long_t rx_bytes;
106257- atomic_long_t rx_packets;
106258- atomic_long_t rx_errors;
106259+ atomic_long_unchecked_t tx_bytes;
106260+ atomic_long_unchecked_t tx_packets;
106261+ atomic_long_unchecked_t tx_dropped;
106262+ atomic_long_unchecked_t rx_bytes;
106263+ atomic_long_unchecked_t rx_packets;
106264+ atomic_long_unchecked_t rx_errors;
106265 };
106266
106267 /* via l2tp_session_priv() */
106268@@ -98,10 +98,10 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
106269 int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
106270
106271 if (likely(ret == NET_XMIT_SUCCESS)) {
106272- atomic_long_add(len, &priv->tx_bytes);
106273- atomic_long_inc(&priv->tx_packets);
106274+ atomic_long_add_unchecked(len, &priv->tx_bytes);
106275+ atomic_long_inc_unchecked(&priv->tx_packets);
106276 } else {
106277- atomic_long_inc(&priv->tx_dropped);
106278+ atomic_long_inc_unchecked(&priv->tx_dropped);
106279 }
106280 return NETDEV_TX_OK;
106281 }
106282@@ -111,12 +111,12 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
106283 {
106284 struct l2tp_eth *priv = netdev_priv(dev);
106285
106286- stats->tx_bytes = atomic_long_read(&priv->tx_bytes);
106287- stats->tx_packets = atomic_long_read(&priv->tx_packets);
106288- stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
106289- stats->rx_bytes = atomic_long_read(&priv->rx_bytes);
106290- stats->rx_packets = atomic_long_read(&priv->rx_packets);
106291- stats->rx_errors = atomic_long_read(&priv->rx_errors);
106292+ stats->tx_bytes = atomic_long_read_unchecked(&priv->tx_bytes);
106293+ stats->tx_packets = atomic_long_read_unchecked(&priv->tx_packets);
106294+ stats->tx_dropped = atomic_long_read_unchecked(&priv->tx_dropped);
106295+ stats->rx_bytes = atomic_long_read_unchecked(&priv->rx_bytes);
106296+ stats->rx_packets = atomic_long_read_unchecked(&priv->rx_packets);
106297+ stats->rx_errors = atomic_long_read_unchecked(&priv->rx_errors);
106298 return stats;
106299 }
106300
106301@@ -167,15 +167,15 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
106302 nf_reset(skb);
106303
106304 if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
106305- atomic_long_inc(&priv->rx_packets);
106306- atomic_long_add(data_len, &priv->rx_bytes);
106307+ atomic_long_inc_unchecked(&priv->rx_packets);
106308+ atomic_long_add_unchecked(data_len, &priv->rx_bytes);
106309 } else {
106310- atomic_long_inc(&priv->rx_errors);
106311+ atomic_long_inc_unchecked(&priv->rx_errors);
106312 }
106313 return;
106314
106315 error:
106316- atomic_long_inc(&priv->rx_errors);
106317+ atomic_long_inc_unchecked(&priv->rx_errors);
106318 kfree_skb(skb);
106319 }
106320
106321diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
106322index 05dfc8aa..df6cfd7 100644
106323--- a/net/l2tp/l2tp_ip.c
106324+++ b/net/l2tp/l2tp_ip.c
106325@@ -608,7 +608,7 @@ static struct inet_protosw l2tp_ip_protosw = {
106326 .ops = &l2tp_ip_ops,
106327 };
106328
106329-static struct net_protocol l2tp_ip_protocol __read_mostly = {
106330+static const struct net_protocol l2tp_ip_protocol = {
106331 .handler = l2tp_ip_recv,
106332 .netns_ok = 1,
106333 };
106334diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
106335index 8611f1b..bc60a2d 100644
106336--- a/net/l2tp/l2tp_ip6.c
106337+++ b/net/l2tp/l2tp_ip6.c
106338@@ -757,7 +757,7 @@ static struct inet_protosw l2tp_ip6_protosw = {
106339 .ops = &l2tp_ip6_ops,
106340 };
106341
106342-static struct inet6_protocol l2tp_ip6_protocol __read_mostly = {
106343+static const struct inet6_protocol l2tp_ip6_protocol = {
106344 .handler = l2tp_ip6_recv,
106345 };
106346
106347diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
106348index 1a3c7e0..80f8b0c 100644
106349--- a/net/llc/llc_proc.c
106350+++ b/net/llc/llc_proc.c
106351@@ -247,7 +247,7 @@ int __init llc_proc_init(void)
106352 int rc = -ENOMEM;
106353 struct proc_dir_entry *p;
106354
106355- llc_proc_dir = proc_mkdir("llc", init_net.proc_net);
106356+ llc_proc_dir = proc_mkdir_restrict("llc", init_net.proc_net);
106357 if (!llc_proc_dir)
106358 goto out;
106359
106360diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
106361index dd4ff36..3462997 100644
106362--- a/net/mac80211/cfg.c
106363+++ b/net/mac80211/cfg.c
106364@@ -581,7 +581,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
106365 ret = ieee80211_vif_use_channel(sdata, chandef,
106366 IEEE80211_CHANCTX_EXCLUSIVE);
106367 }
106368- } else if (local->open_count == local->monitors) {
106369+ } else if (local_read(&local->open_count) == local->monitors) {
106370 local->_oper_chandef = *chandef;
106371 ieee80211_hw_config(local, 0);
106372 }
106373@@ -3468,7 +3468,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
106374 else
106375 local->probe_req_reg--;
106376
106377- if (!local->open_count)
106378+ if (!local_read(&local->open_count))
106379 break;
106380
106381 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
106382@@ -3603,8 +3603,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
106383 if (chanctx_conf) {
106384 *chandef = sdata->vif.bss_conf.chandef;
106385 ret = 0;
106386- } else if (local->open_count > 0 &&
106387- local->open_count == local->monitors &&
106388+ } else if (local_read(&local->open_count) > 0 &&
106389+ local_read(&local->open_count) == local->monitors &&
106390 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
106391 if (local->use_chanctx)
106392 *chandef = local->monitor_chandef;
106393diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
106394index 8d53d65..a4ac794 100644
106395--- a/net/mac80211/ieee80211_i.h
106396+++ b/net/mac80211/ieee80211_i.h
106397@@ -29,6 +29,7 @@
106398 #include <net/ieee80211_radiotap.h>
106399 #include <net/cfg80211.h>
106400 #include <net/mac80211.h>
106401+#include <asm/local.h>
106402 #include "key.h"
106403 #include "sta_info.h"
106404 #include "debug.h"
106405@@ -1126,7 +1127,7 @@ struct ieee80211_local {
106406 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
106407 spinlock_t queue_stop_reason_lock;
106408
106409- int open_count;
106410+ local_t open_count;
106411 int monitors, cooked_mntrs;
106412 /* number of interfaces with corresponding FIF_ flags */
106413 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
106414diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
106415index 81a2751..c06a026 100644
106416--- a/net/mac80211/iface.c
106417+++ b/net/mac80211/iface.c
106418@@ -544,7 +544,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
106419 break;
106420 }
106421
106422- if (local->open_count == 0) {
106423+ if (local_read(&local->open_count) == 0) {
106424 res = drv_start(local);
106425 if (res)
106426 goto err_del_bss;
106427@@ -591,7 +591,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
106428 res = drv_add_interface(local, sdata);
106429 if (res)
106430 goto err_stop;
106431- } else if (local->monitors == 0 && local->open_count == 0) {
106432+ } else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
106433 res = ieee80211_add_virtual_monitor(local);
106434 if (res)
106435 goto err_stop;
106436@@ -701,7 +701,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
106437 atomic_inc(&local->iff_promiscs);
106438
106439 if (coming_up)
106440- local->open_count++;
106441+ local_inc(&local->open_count);
106442
106443 if (hw_reconf_flags)
106444 ieee80211_hw_config(local, hw_reconf_flags);
106445@@ -739,7 +739,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
106446 err_del_interface:
106447 drv_remove_interface(local, sdata);
106448 err_stop:
106449- if (!local->open_count)
106450+ if (!local_read(&local->open_count))
106451 drv_stop(local);
106452 err_del_bss:
106453 sdata->bss = NULL;
106454@@ -907,7 +907,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
106455 }
106456
106457 if (going_down)
106458- local->open_count--;
106459+ local_dec(&local->open_count);
106460
106461 switch (sdata->vif.type) {
106462 case NL80211_IFTYPE_AP_VLAN:
106463@@ -969,7 +969,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
106464 }
106465 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
106466
106467- if (local->open_count == 0)
106468+ if (local_read(&local->open_count) == 0)
106469 ieee80211_clear_tx_pending(local);
106470
106471 /*
106472@@ -1012,7 +1012,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
106473 if (cancel_scan)
106474 flush_delayed_work(&local->scan_work);
106475
106476- if (local->open_count == 0) {
106477+ if (local_read(&local->open_count) == 0) {
106478 ieee80211_stop_device(local);
106479
106480 /* no reconfiguring after stop! */
106481@@ -1023,7 +1023,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
106482 ieee80211_configure_filter(local);
106483 ieee80211_hw_config(local, hw_reconf_flags);
106484
106485- if (local->monitors == local->open_count)
106486+ if (local->monitors == local_read(&local->open_count))
106487 ieee80211_add_virtual_monitor(local);
106488 }
106489
106490diff --git a/net/mac80211/main.c b/net/mac80211/main.c
106491index 5e09d35..e2fdbe2 100644
106492--- a/net/mac80211/main.c
106493+++ b/net/mac80211/main.c
106494@@ -175,7 +175,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
106495 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
106496 IEEE80211_CONF_CHANGE_POWER);
106497
106498- if (changed && local->open_count) {
106499+ if (changed && local_read(&local->open_count)) {
106500 ret = drv_config(local, changed);
106501 /*
106502 * Goal:
106503diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
106504index ca405b6..6cc8bee 100644
106505--- a/net/mac80211/pm.c
106506+++ b/net/mac80211/pm.c
106507@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
106508 struct ieee80211_sub_if_data *sdata;
106509 struct sta_info *sta;
106510
106511- if (!local->open_count)
106512+ if (!local_read(&local->open_count))
106513 goto suspend;
106514
106515 ieee80211_scan_cancel(local);
106516@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
106517 cancel_work_sync(&local->dynamic_ps_enable_work);
106518 del_timer_sync(&local->dynamic_ps_timer);
106519
106520- local->wowlan = wowlan && local->open_count;
106521+ local->wowlan = wowlan && local_read(&local->open_count);
106522 if (local->wowlan) {
106523 int err = drv_suspend(local, wowlan);
106524 if (err < 0) {
106525@@ -126,7 +126,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
106526 WARN_ON(!list_empty(&local->chanctx_list));
106527
106528 /* stop hardware - this must stop RX */
106529- if (local->open_count)
106530+ if (local_read(&local->open_count))
106531 ieee80211_stop_device(local);
106532
106533 suspend:
106534diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
106535index d53355b..21f583a 100644
106536--- a/net/mac80211/rate.c
106537+++ b/net/mac80211/rate.c
106538@@ -724,7 +724,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
106539
106540 ASSERT_RTNL();
106541
106542- if (local->open_count)
106543+ if (local_read(&local->open_count))
106544 return -EBUSY;
106545
106546 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
106547diff --git a/net/mac80211/util.c b/net/mac80211/util.c
106548index 747bdcf..eb2b981 100644
106549--- a/net/mac80211/util.c
106550+++ b/net/mac80211/util.c
106551@@ -1741,7 +1741,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
106552 bool sched_scan_stopped = false;
106553
106554 /* nothing to do if HW shouldn't run */
106555- if (!local->open_count)
106556+ if (!local_read(&local->open_count))
106557 goto wake_up;
106558
106559 #ifdef CONFIG_PM
106560@@ -1993,7 +1993,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
106561 local->in_reconfig = false;
106562 barrier();
106563
106564- if (local->monitors == local->open_count && local->monitors > 0)
106565+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
106566 ieee80211_add_virtual_monitor(local);
106567
106568 /*
106569@@ -2048,7 +2048,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
106570 * If this is for hw restart things are still running.
106571 * We may want to change that later, however.
106572 */
106573- if (local->open_count && (!local->suspended || reconfig_due_to_wowlan))
106574+ if (local_read(&local->open_count) && (!local->suspended || reconfig_due_to_wowlan))
106575 drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_RESTART);
106576
106577 if (!local->suspended)
106578@@ -2072,7 +2072,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
106579 flush_delayed_work(&local->scan_work);
106580 }
106581
106582- if (local->open_count && !reconfig_due_to_wowlan)
106583+ if (local_read(&local->open_count) && !reconfig_due_to_wowlan)
106584 drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_SUSPEND);
106585
106586 list_for_each_entry(sdata, &local->interfaces, list) {
106587diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
106588index b02660f..c0f791c 100644
106589--- a/net/netfilter/Kconfig
106590+++ b/net/netfilter/Kconfig
106591@@ -1122,6 +1122,16 @@ config NETFILTER_XT_MATCH_ESP
106592
106593 To compile it as a module, choose M here. If unsure, say N.
106594
106595+config NETFILTER_XT_MATCH_GRADM
106596+ tristate '"gradm" match support'
106597+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
106598+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
106599+ ---help---
106600+ The gradm match allows to match on grsecurity RBAC being enabled.
106601+ It is useful when iptables rules are applied early on bootup to
106602+ prevent connections to the machine (except from a trusted host)
106603+ while the RBAC system is disabled.
106604+
106605 config NETFILTER_XT_MATCH_HASHLIMIT
106606 tristate '"hashlimit" match support'
106607 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
106608diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
106609index 89f73a9..e4e5bd9 100644
106610--- a/net/netfilter/Makefile
106611+++ b/net/netfilter/Makefile
106612@@ -139,6 +139,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
106613 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
106614 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
106615 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
106616+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
106617 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
106618 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
106619 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
106620diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
106621index d259da3..6a32b2c 100644
106622--- a/net/netfilter/ipset/ip_set_core.c
106623+++ b/net/netfilter/ipset/ip_set_core.c
106624@@ -1952,7 +1952,7 @@ done:
106625 return ret;
106626 }
106627
106628-static struct nf_sockopt_ops so_set __read_mostly = {
106629+static struct nf_sockopt_ops so_set = {
106630 .pf = PF_INET,
106631 .get_optmin = SO_IP_SET,
106632 .get_optmax = SO_IP_SET + 1,
106633diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
106634index b0f7b62..0541842 100644
106635--- a/net/netfilter/ipvs/ip_vs_conn.c
106636+++ b/net/netfilter/ipvs/ip_vs_conn.c
106637@@ -572,7 +572,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
106638 /* Increase the refcnt counter of the dest */
106639 ip_vs_dest_hold(dest);
106640
106641- conn_flags = atomic_read(&dest->conn_flags);
106642+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
106643 if (cp->protocol != IPPROTO_UDP)
106644 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
106645 flags = cp->flags;
106646@@ -922,7 +922,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
106647
106648 cp->control = NULL;
106649 atomic_set(&cp->n_control, 0);
106650- atomic_set(&cp->in_pkts, 0);
106651+ atomic_set_unchecked(&cp->in_pkts, 0);
106652
106653 cp->packet_xmit = NULL;
106654 cp->app = NULL;
106655@@ -1229,7 +1229,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
106656
106657 /* Don't drop the entry if its number of incoming packets is not
106658 located in [0, 8] */
106659- i = atomic_read(&cp->in_pkts);
106660+ i = atomic_read_unchecked(&cp->in_pkts);
106661 if (i > 8 || i < 0) return 0;
106662
106663 if (!todrop_rate[i]) return 0;
106664diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
106665index b87ca32..76c7799 100644
106666--- a/net/netfilter/ipvs/ip_vs_core.c
106667+++ b/net/netfilter/ipvs/ip_vs_core.c
106668@@ -568,7 +568,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
106669 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
106670 /* do not touch skb anymore */
106671
106672- atomic_inc(&cp->in_pkts);
106673+ atomic_inc_unchecked(&cp->in_pkts);
106674 ip_vs_conn_put(cp);
106675 return ret;
106676 }
106677@@ -1723,7 +1723,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
106678 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
106679 pkts = sysctl_sync_threshold(ipvs);
106680 else
106681- pkts = atomic_add_return(1, &cp->in_pkts);
106682+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
106683
106684 if (ipvs->sync_state & IP_VS_STATE_MASTER)
106685 ip_vs_sync_conn(net, cp, pkts);
106686diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
106687index ed99448..3ba6cad 100644
106688--- a/net/netfilter/ipvs/ip_vs_ctl.c
106689+++ b/net/netfilter/ipvs/ip_vs_ctl.c
106690@@ -799,7 +799,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
106691 */
106692 ip_vs_rs_hash(ipvs, dest);
106693 }
106694- atomic_set(&dest->conn_flags, conn_flags);
106695+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
106696
106697 /* bind the service */
106698 old_svc = rcu_dereference_protected(dest->svc, 1);
106699@@ -1664,7 +1664,7 @@ proc_do_sync_ports(struct ctl_table *table, int write,
106700 * align with netns init in ip_vs_control_net_init()
106701 */
106702
106703-static struct ctl_table vs_vars[] = {
106704+static ctl_table_no_const vs_vars[] __read_only = {
106705 {
106706 .procname = "amemthresh",
106707 .maxlen = sizeof(int),
106708@@ -1999,7 +1999,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
106709 " %-7s %-6d %-10d %-10d\n",
106710 &dest->addr.in6,
106711 ntohs(dest->port),
106712- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
106713+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
106714 atomic_read(&dest->weight),
106715 atomic_read(&dest->activeconns),
106716 atomic_read(&dest->inactconns));
106717@@ -2010,7 +2010,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
106718 "%-7s %-6d %-10d %-10d\n",
106719 ntohl(dest->addr.ip),
106720 ntohs(dest->port),
106721- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
106722+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
106723 atomic_read(&dest->weight),
106724 atomic_read(&dest->activeconns),
106725 atomic_read(&dest->inactconns));
106726@@ -2499,7 +2499,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
106727
106728 entry.addr = dest->addr.ip;
106729 entry.port = dest->port;
106730- entry.conn_flags = atomic_read(&dest->conn_flags);
106731+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
106732 entry.weight = atomic_read(&dest->weight);
106733 entry.u_threshold = dest->u_threshold;
106734 entry.l_threshold = dest->l_threshold;
106735@@ -3040,7 +3040,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
106736 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
106737 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
106738 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
106739- (atomic_read(&dest->conn_flags) &
106740+ (atomic_read_unchecked(&dest->conn_flags) &
106741 IP_VS_CONN_F_FWD_MASK)) ||
106742 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
106743 atomic_read(&dest->weight)) ||
106744@@ -3675,7 +3675,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
106745 {
106746 int idx;
106747 struct netns_ipvs *ipvs = net_ipvs(net);
106748- struct ctl_table *tbl;
106749+ ctl_table_no_const *tbl;
106750
106751 atomic_set(&ipvs->dropentry, 0);
106752 spin_lock_init(&ipvs->dropentry_lock);
106753diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
106754index 127f140..553d652 100644
106755--- a/net/netfilter/ipvs/ip_vs_lblc.c
106756+++ b/net/netfilter/ipvs/ip_vs_lblc.c
106757@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
106758 * IPVS LBLC sysctl table
106759 */
106760 #ifdef CONFIG_SYSCTL
106761-static struct ctl_table vs_vars_table[] = {
106762+static ctl_table_no_const vs_vars_table[] __read_only = {
106763 {
106764 .procname = "lblc_expiration",
106765 .data = NULL,
106766diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
106767index 2229d2d..b32b785 100644
106768--- a/net/netfilter/ipvs/ip_vs_lblcr.c
106769+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
106770@@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
106771 * IPVS LBLCR sysctl table
106772 */
106773
106774-static struct ctl_table vs_vars_table[] = {
106775+static ctl_table_no_const vs_vars_table[] __read_only = {
106776 {
106777 .procname = "lblcr_expiration",
106778 .data = NULL,
106779diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
106780index d93ceeb..4556144 100644
106781--- a/net/netfilter/ipvs/ip_vs_sync.c
106782+++ b/net/netfilter/ipvs/ip_vs_sync.c
106783@@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
106784 cp = cp->control;
106785 if (cp) {
106786 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
106787- pkts = atomic_add_return(1, &cp->in_pkts);
106788+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
106789 else
106790 pkts = sysctl_sync_threshold(ipvs);
106791 ip_vs_sync_conn(net, cp->control, pkts);
106792@@ -771,7 +771,7 @@ control:
106793 if (!cp)
106794 return;
106795 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
106796- pkts = atomic_add_return(1, &cp->in_pkts);
106797+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
106798 else
106799 pkts = sysctl_sync_threshold(ipvs);
106800 goto sloop;
106801@@ -902,7 +902,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
106802
106803 if (opt)
106804 memcpy(&cp->in_seq, opt, sizeof(*opt));
106805- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
106806+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
106807 cp->state = state;
106808 cp->old_state = cp->state;
106809 /*
106810diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
106811index 3aedbda..6a63567 100644
106812--- a/net/netfilter/ipvs/ip_vs_xmit.c
106813+++ b/net/netfilter/ipvs/ip_vs_xmit.c
106814@@ -1214,7 +1214,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
106815 else
106816 rc = NF_ACCEPT;
106817 /* do not touch skb anymore */
106818- atomic_inc(&cp->in_pkts);
106819+ atomic_inc_unchecked(&cp->in_pkts);
106820 goto out;
106821 }
106822
106823@@ -1307,7 +1307,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
106824 else
106825 rc = NF_ACCEPT;
106826 /* do not touch skb anymore */
106827- atomic_inc(&cp->in_pkts);
106828+ atomic_inc_unchecked(&cp->in_pkts);
106829 goto out;
106830 }
106831
106832diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
106833index a4b5e2a..13b1de3 100644
106834--- a/net/netfilter/nf_conntrack_acct.c
106835+++ b/net/netfilter/nf_conntrack_acct.c
106836@@ -62,7 +62,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
106837 #ifdef CONFIG_SYSCTL
106838 static int nf_conntrack_acct_init_sysctl(struct net *net)
106839 {
106840- struct ctl_table *table;
106841+ ctl_table_no_const *table;
106842
106843 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
106844 GFP_KERNEL);
106845diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
106846index 13fad86..18c984c 100644
106847--- a/net/netfilter/nf_conntrack_core.c
106848+++ b/net/netfilter/nf_conntrack_core.c
106849@@ -1733,6 +1733,10 @@ void nf_conntrack_init_end(void)
106850 #define DYING_NULLS_VAL ((1<<30)+1)
106851 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
106852
106853+#ifdef CONFIG_GRKERNSEC_HIDESYM
106854+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
106855+#endif
106856+
106857 int nf_conntrack_init_net(struct net *net)
106858 {
106859 int ret = -ENOMEM;
106860@@ -1758,7 +1762,11 @@ int nf_conntrack_init_net(struct net *net)
106861 if (!net->ct.stat)
106862 goto err_pcpu_lists;
106863
106864+#ifdef CONFIG_GRKERNSEC_HIDESYM
106865+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08x", atomic_inc_return_unchecked(&conntrack_cache_id));
106866+#else
106867 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
106868+#endif
106869 if (!net->ct.slabname)
106870 goto err_slabname;
106871
106872diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
106873index 4e78c57..ec8fb74 100644
106874--- a/net/netfilter/nf_conntrack_ecache.c
106875+++ b/net/netfilter/nf_conntrack_ecache.c
106876@@ -264,7 +264,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
106877 #ifdef CONFIG_SYSCTL
106878 static int nf_conntrack_event_init_sysctl(struct net *net)
106879 {
106880- struct ctl_table *table;
106881+ ctl_table_no_const *table;
106882
106883 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
106884 GFP_KERNEL);
106885diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
106886index bd9d315..989947e 100644
106887--- a/net/netfilter/nf_conntrack_helper.c
106888+++ b/net/netfilter/nf_conntrack_helper.c
106889@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
106890
106891 static int nf_conntrack_helper_init_sysctl(struct net *net)
106892 {
106893- struct ctl_table *table;
106894+ ctl_table_no_const *table;
106895
106896 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
106897 GFP_KERNEL);
106898diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
106899index b65d586..beec902 100644
106900--- a/net/netfilter/nf_conntrack_proto.c
106901+++ b/net/netfilter/nf_conntrack_proto.c
106902@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
106903
106904 static void
106905 nf_ct_unregister_sysctl(struct ctl_table_header **header,
106906- struct ctl_table **table,
106907+ ctl_table_no_const **table,
106908 unsigned int users)
106909 {
106910 if (users > 0)
106911diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
106912index fc823fa..8311af3 100644
106913--- a/net/netfilter/nf_conntrack_standalone.c
106914+++ b/net/netfilter/nf_conntrack_standalone.c
106915@@ -468,7 +468,7 @@ static struct ctl_table nf_ct_netfilter_table[] = {
106916
106917 static int nf_conntrack_standalone_init_sysctl(struct net *net)
106918 {
106919- struct ctl_table *table;
106920+ ctl_table_no_const *table;
106921
106922 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
106923 GFP_KERNEL);
106924diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
106925index 7a394df..bd91a8a 100644
106926--- a/net/netfilter/nf_conntrack_timestamp.c
106927+++ b/net/netfilter/nf_conntrack_timestamp.c
106928@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
106929 #ifdef CONFIG_SYSCTL
106930 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
106931 {
106932- struct ctl_table *table;
106933+ ctl_table_no_const *table;
106934
106935 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
106936 GFP_KERNEL);
106937diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
106938index 675d12c..b36e825 100644
106939--- a/net/netfilter/nf_log.c
106940+++ b/net/netfilter/nf_log.c
106941@@ -386,7 +386,7 @@ static const struct file_operations nflog_file_ops = {
106942
106943 #ifdef CONFIG_SYSCTL
106944 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
106945-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
106946+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
106947
106948 static int nf_log_proc_dostring(struct ctl_table *table, int write,
106949 void __user *buffer, size_t *lenp, loff_t *ppos)
106950@@ -417,13 +417,15 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
106951 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
106952 mutex_unlock(&nf_log_mutex);
106953 } else {
106954+ ctl_table_no_const nf_log_table = *table;
106955+
106956 mutex_lock(&nf_log_mutex);
106957 logger = nft_log_dereference(net->nf.nf_loggers[tindex]);
106958 if (!logger)
106959- table->data = "NONE";
106960+ nf_log_table.data = "NONE";
106961 else
106962- table->data = logger->name;
106963- r = proc_dostring(table, write, buffer, lenp, ppos);
106964+ nf_log_table.data = logger->name;
106965+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
106966 mutex_unlock(&nf_log_mutex);
106967 }
106968
106969diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
106970index c68c1e5..8b5d670 100644
106971--- a/net/netfilter/nf_sockopt.c
106972+++ b/net/netfilter/nf_sockopt.c
106973@@ -43,7 +43,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
106974 }
106975 }
106976
106977- list_add(&reg->list, &nf_sockopts);
106978+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
106979 out:
106980 mutex_unlock(&nf_sockopt_mutex);
106981 return ret;
106982@@ -53,7 +53,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
106983 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
106984 {
106985 mutex_lock(&nf_sockopt_mutex);
106986- list_del(&reg->list);
106987+ pax_list_del((struct list_head *)&reg->list);
106988 mutex_unlock(&nf_sockopt_mutex);
106989 }
106990 EXPORT_SYMBOL(nf_unregister_sockopt);
106991diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
106992index 11d85b3..7fcc420 100644
106993--- a/net/netfilter/nfnetlink_log.c
106994+++ b/net/netfilter/nfnetlink_log.c
106995@@ -83,7 +83,7 @@ static int nfnl_log_net_id __read_mostly;
106996 struct nfnl_log_net {
106997 spinlock_t instances_lock;
106998 struct hlist_head instance_table[INSTANCE_BUCKETS];
106999- atomic_t global_seq;
107000+ atomic_unchecked_t global_seq;
107001 };
107002
107003 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
107004@@ -563,7 +563,7 @@ __build_packet_message(struct nfnl_log_net *log,
107005 /* global sequence number */
107006 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
107007 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
107008- htonl(atomic_inc_return(&log->global_seq))))
107009+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
107010 goto nla_put_failure;
107011
107012 if (data_len) {
107013diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
107014index 65f3e2b..2e9d6a0 100644
107015--- a/net/netfilter/nft_compat.c
107016+++ b/net/netfilter/nft_compat.c
107017@@ -317,14 +317,7 @@ static void nft_match_eval(const struct nft_expr *expr,
107018 return;
107019 }
107020
107021- switch(ret) {
107022- case true:
107023- data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
107024- break;
107025- case false:
107026- data[NFT_REG_VERDICT].verdict = NFT_BREAK;
107027- break;
107028- }
107029+ data[NFT_REG_VERDICT].verdict = ret ? NFT_CONTINUE : NFT_BREAK;
107030 }
107031
107032 static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = {
107033diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
107034new file mode 100644
107035index 0000000..c566332
107036--- /dev/null
107037+++ b/net/netfilter/xt_gradm.c
107038@@ -0,0 +1,51 @@
107039+/*
107040+ * gradm match for netfilter
107041